patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -10,11 +10,11 @@ import ( "context" "crypto/sha256" "encoding/hex" + "github.com/facebookgo/clock" "math/big" "strings" "testing" - "github.com/facebookgo/clock" "github.com/pkg/errors" "github.com/stretchr/testify/require" "golang.org/x/crypto/blake2b"
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import ( "context" "crypto/sha256" "encoding/hex" "math/big" "strings" "testing" "github.com/facebookgo/clock" "github.com/pkg/errors" "github.com/stretchr/testify/require" "golang.org/x/crypto/blake2b" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/blockchain/action" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/crypto" "github.com/iotexproject/iotex-core/iotxaddress" "github.com/iotexproject/iotex-core/pkg/enc" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/version" "github.com/iotexproject/iotex-core/proto" "github.com/iotexproject/iotex-core/state" ta "github.com/iotexproject/iotex-core/test/testaddress" "github.com/iotexproject/iotex-core/testutil" ) func TestBasicHash(t *testing.T) { require := require.New(t) // basic hash test input := []byte("hello") hash := sha256.Sum256(input) hash = sha256.Sum256(hash[:]) hello, _ := hex.DecodeString("9595c9df90075148eb06860365df33584b75bff782a510c6cd4883a419833d50") require.Equal(hello, hash[:]) t.Logf("sha256(sha256(\"hello\") = %x", hash) hash = blake2b.Sum256(input) hash = blake2b.Sum256(hash[:]) hello, _ = hex.DecodeString("901c60ffffd77f743729f8fea0233c0b00223428b5192c2015f853562b45ce59") require.Equal(hello, hash[:]) t.Logf("blake2b(blake2b(\"hello\") = %x", hash) } func TestMerkle(t *testing.T) { require := require.New(t) amount := uint64(50 << 22) // create testing transactions cbtsf0 := action.NewCoinBaseTransfer(big.NewInt(int64(amount)), ta.Addrinfo["producer"].RawAddress) require.NotNil(cbtsf0) cbtsf1 := action.NewCoinBaseTransfer(big.NewInt(int64(amount)), ta.Addrinfo["alfa"].RawAddress) require.NotNil(cbtsf1) cbtsf2 := action.NewCoinBaseTransfer(big.NewInt(int64(amount)), ta.Addrinfo["bravo"].RawAddress) require.NotNil(cbtsf2) cbtsf3 := action.NewCoinBaseTransfer(big.NewInt(int64(amount)), ta.Addrinfo["charlie"].RawAddress) require.NotNil(cbtsf3) cbtsf4 := action.NewCoinBaseTransfer(big.NewInt(int64(amount)), ta.Addrinfo["echo"].RawAddress) require.NotNil(cbtsf4) // verify tx hash hash0, _ := hex.DecodeString("c42f754fdf676a6ac4cdccba96f2dc1055c41c25effc72ac9477e120712e5634") actual := cbtsf0.Hash() require.Equal(hash0, actual[:]) t.Logf("actual hash = %x", actual[:]) hash1, _ := hex.DecodeString("2c4bcfb59297b3e472f7c15ff31a3ed080b749a952c18bb585ef517542c8381d") actual = cbtsf1.Hash() require.Equal(hash1, actual[:]) t.Logf("actual hash = %x", actual[:]) hash2, _ := hex.DecodeString("46e07d8753a07d66f9b76797a0e3257fd2b70b019722dfb3394ba51db2b21b62") actual = cbtsf2.Hash() require.Equal(hash2, actual[:]) t.Logf("actual hash = %x", actual[:]) hash3, _ := hex.DecodeString("d300718263371fb0218a2616f8822866547dade0f0b1dbe3d326950c4488f6de") actual = cbtsf3.Hash() require.Equal(hash3, actual[:]) t.Logf("actual hash = %x", actual[:]) hash4, _ := hex.DecodeString("75b315ef2baaa13af4579876d018db0f512e132d3c4b41b5ebe9d0b75e9cf054") actual = cbtsf4.Hash() require.Equal(hash4, actual[:]) t.Logf("actual hash = %x", actual[:]) // manually compute merkle root cat := append(hash0, hash1...) hash01 := blake2b.Sum256(cat) t.Logf("hash01 = %x", hash01) cat = append(hash2, hash3...) hash23 := blake2b.Sum256(cat) t.Logf("hash23 = %x", hash23) cat = append(hash4, hash4...) hash45 := blake2b.Sum256(cat) t.Logf("hash45 = %x", hash45) cat = append(hash01[:], hash23[:]...) hash03 := blake2b.Sum256(cat) t.Logf("hash03 = %x", hash03) cat = append(hash45[:], hash45[:]...) hash47 := blake2b.Sum256(cat) t.Logf("hash47 = %x", hash47) cat = append(hash03[:], hash47[:]...) hash07 := blake2b.Sum256(cat) t.Logf("hash07 = %x", hash07) // create block using above 5 tx and verify merkle block := NewBlock( 0, 0, hash.ZeroHash32B, clock.New(), []*action.Transfer{cbtsf0, cbtsf1, cbtsf2, cbtsf3, cbtsf4}, nil, nil, ) hash := block.TxRoot() require.Equal(hash07[:], hash[:]) t.Log("Merkle root match pass\n") } func TestConvertFromBlockPb(t *testing.T) { blk := Block{} blk.ConvertFromBlockPb(&iproto.BlockPb{ Header: &iproto.BlockHeaderPb{ Version: version.ProtocolVersion, Height: 123456789, }, Actions: []*iproto.ActionPb{ {Action: &iproto.ActionPb_Transfer{ Transfer: &iproto.TransferPb{}, }, Version: version.ProtocolVersion, Nonce: 101, }, {Action: &iproto.ActionPb_Transfer{ Transfer: &iproto.TransferPb{}, }, Version: version.ProtocolVersion, Nonce: 102, }, {Action: &iproto.ActionPb_Vote{ Vote: &iproto.VotePb{}, }, Version: version.ProtocolVersion, Nonce: 103, }, {Action: &iproto.ActionPb_Vote{ Vote: &iproto.VotePb{}, }, Version: version.ProtocolVersion, Nonce: 104, }, }, }) blk.Header.txRoot = blk.TxRoot() raw, err := blk.Serialize() require.Nil(t, err) var newblk Block err = newblk.Deserialize(raw) require.Nil(t, err) blockBytes := blk.ByteStream() require.True(t, len(blockBytes) > 0) require.Equal(t, uint64(123456789), newblk.Header.height) require.Equal(t, uint64(101), newblk.Transfers[0].Nonce()) require.Equal(t, uint64(102), newblk.Transfers[1].Nonce()) require.Equal(t, uint64(103), newblk.Votes[0].Nonce()) require.Equal(t, uint64(104), newblk.Votes[1].Nonce()) } func TestWrongRootHash(t *testing.T) { require := require.New(t) val := validator{nil, ""} tsf1, err := action.NewTransfer(1, big.NewInt(20), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["alfa"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf1, ta.Addrinfo["producer"].PrivateKey)) tsf2, err := action.NewTransfer(1, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf2, ta.Addrinfo["producer"].PrivateKey)) hash := tsf1.Hash() blk := NewBlock(1, 1, hash, clock.New(), []*action.Transfer{tsf1, tsf2}, nil, nil) blk.Header.Pubkey = ta.Addrinfo["producer"].PublicKey blkHash := blk.HashBlock() blk.Header.blockSig = crypto.EC283.Sign(ta.Addrinfo["producer"].PrivateKey, blkHash[:]) require.Nil(val.Validate(blk, 0, hash, true)) blk.Transfers[0], blk.Transfers[1] = blk.Transfers[1], blk.Transfers[0] require.NotNil(val.Validate(blk, 0, hash, true)) } func TestSignBlock(t *testing.T) { require := require.New(t) val := validator{nil, ""} tsf1, err := action.NewTransfer(1, big.NewInt(20), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["alfa"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf1, ta.Addrinfo["producer"].PrivateKey)) tsf2, err := action.NewTransfer(1, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf2, ta.Addrinfo["producer"].PrivateKey)) hash := tsf1.Hash() blk := NewBlock(1, 3, hash, clock.New(), []*action.Transfer{tsf1, tsf2}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.Nil(err) require.Nil(val.Validate(blk, 2, hash, true)) } func TestWrongNonce(t *testing.T) { cfg := &config.Default testutil.CleanupPath(t, cfg.Chain.TrieDBPath) defer testutil.CleanupPath(t, cfg.Chain.TrieDBPath) testutil.CleanupPath(t, cfg.Chain.ChainDBPath) defer testutil.CleanupPath(t, cfg.Chain.ChainDBPath) require := require.New(t) sf, err := state.NewFactory(cfg, state.DefaultTrieOption()) require.NoError(err) require.NoError(sf.Start(context.Background())) _, err = sf.LoadOrCreateState(ta.Addrinfo["producer"].RawAddress, Gen.TotalSupply) require.NoError(err) val := validator{sf, ""} _, err = sf.RunActions(0, nil, nil, nil) require.Nil(err) require.Nil(sf.Commit()) chainID := enc.MachineEndian.Uint32(iotxaddress.ChainID) // correct nonce coinbaseTsf := action.NewCoinBaseTransfer(big.NewInt(int64(Gen.BlockReward)), ta.Addrinfo["producer"].RawAddress) tsf1, err := action.NewTransfer(1, big.NewInt(20), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["alfa"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf1, ta.Addrinfo["producer"].PrivateKey)) hash := tsf1.Hash() blk := NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf, tsf1}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) require.Nil(val.Validate(blk, 2, hash, true)) _, err = sf.RunActions(1, []*action.Transfer{tsf1}, nil, nil) require.NoError(err) require.Nil(sf.Commit()) // low nonce tsf2, err := action.NewTransfer(1, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf2, ta.Addrinfo["producer"].PrivateKey)) hash = tsf1.Hash() blk = NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf, tsf1, tsf2}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Equal(ErrActionNonce, errors.Cause(err)) vote, err := action.NewVote(1, ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(vote, ta.Addrinfo["producer"].PrivateKey)) hash = tsf1.Hash() blk = NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf}, []*action.Vote{vote}, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.Equal(ErrActionNonce, errors.Cause(err)) // duplicate nonce tsf3, err := action.NewTransfer(2, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf3, ta.Addrinfo["producer"].PrivateKey)) tsf4, err := action.NewTransfer(2, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf4, ta.Addrinfo["producer"].PrivateKey)) hash = tsf1.Hash() blk = NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf, tsf3, tsf4}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.Equal(ErrActionNonce, errors.Cause(err)) vote2, err := action.NewVote(2, ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(vote2, ta.Addrinfo["producer"].PrivateKey)) vote3, err := action.NewVote(2, ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["charlie"].RawAddress, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(vote3, ta.Addrinfo["producer"].PrivateKey)) hash = tsf1.Hash() blk = NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf}, []*action.Vote{vote2, vote3}, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.Equal(ErrActionNonce, errors.Cause(err)) // non consecutive nonce tsf5, err := action.NewTransfer(2, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf5, ta.Addrinfo["producer"].PrivateKey)) tsf6, err := action.NewTransfer(4, big.NewInt(30), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf6, ta.Addrinfo["producer"].PrivateKey)) hash = tsf1.Hash() blk = NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf, tsf5, tsf6}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.Equal(ErrActionNonce, errors.Cause(err)) vote4, err := action.NewVote(2, ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["bravo"].RawAddress, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(vote4, ta.Addrinfo["producer"].PrivateKey)) vote5, err := action.NewVote(4, ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["charlie"].RawAddress, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(vote5, ta.Addrinfo["producer"].PrivateKey)) hash = tsf1.Hash() blk = NewBlock(chainID, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf}, []*action.Vote{vote4, vote5}, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.Equal(ErrActionNonce, errors.Cause(err)) } func TestWrongCoinbaseTsf(t *testing.T) { cfg := &config.Default testutil.CleanupPath(t, cfg.Chain.TrieDBPath) defer testutil.CleanupPath(t, cfg.Chain.TrieDBPath) testutil.CleanupPath(t, cfg.Chain.ChainDBPath) defer testutil.CleanupPath(t, cfg.Chain.ChainDBPath) require := require.New(t) sf, err := state.NewFactory(cfg, state.DefaultTrieOption()) require.NoError(err) require.NoError(sf.Start(context.Background())) _, err = sf.LoadOrCreateState(ta.Addrinfo["producer"].RawAddress, Gen.TotalSupply) require.Nil(err) val := validator{sf, ""} _, err = sf.RunActions(0, nil, nil, nil) require.Nil(err) // no coinbase tsf coinbaseTsf := action.NewCoinBaseTransfer(big.NewInt(int64(Gen.BlockReward)), ta.Addrinfo["producer"].RawAddress) tsf1, err := action.NewTransfer(1, big.NewInt(20), ta.Addrinfo["producer"].RawAddress, ta.Addrinfo["alfa"].RawAddress, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(err) require.NoError(action.Sign(tsf1, ta.Addrinfo["producer"].PrivateKey)) hash := tsf1.Hash() blk := NewBlock(1, 3, hash, clock.New(), []*action.Transfer{tsf1}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.True( strings.Contains(err.Error(), "wrong number of coinbase transfers"), ) // extra coinbase transfer blk = NewBlock(1, 3, hash, clock.New(), []*action.Transfer{coinbaseTsf, coinbaseTsf, tsf1}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.True( strings.Contains(err.Error(), "wrong number of coinbase transfers"), ) // no transfer blk = NewBlock(1, 3, hash, clock.New(), []*action.Transfer{}, nil, nil) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, true) require.Error(err) require.True( strings.Contains(err.Error(), "wrong number of coinbase transfers"), ) } func TestWrongAddress(t *testing.T) { val := validator{} invalidRecipient := "io1qyqsyqcyq5narhapakcsrhksfajfcpl24us3xp38zwvsep" tsf, err := action.NewTransfer(1, big.NewInt(1), ta.Addrinfo["producer"].RawAddress, invalidRecipient, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(t, err) blk1 := NewBlock(1, 3, hash.ZeroHash32B, clock.New(), []*action.Transfer{tsf}, nil, nil) err = val.verifyActions(blk1, true) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "failed to validate transfer recipient's address")) invalidVotee := "ioaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" vote, err := action.NewVote(1, ta.Addrinfo["producer"].RawAddress, invalidVotee, uint64(100000), big.NewInt(10)) require.NoError(t, err) blk2 := NewBlock(1, 3, hash.ZeroHash32B, clock.New(), nil, []*action.Vote{vote}, nil) err = val.verifyActions(blk2, true) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "failed to validate votee's address")) invalidContract := "123" execution, err := action.NewExecution(ta.Addrinfo["producer"].RawAddress, invalidContract, 1, big.NewInt(1), uint64(100000), big.NewInt(10), []byte{}) require.NoError(t, err) blk3 := NewBlock(1, 3, hash.ZeroHash32B, clock.New(), nil, nil, []*action.Execution{execution}) err = val.verifyActions(blk3, true) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "failed to validate contract's address")) } func TestCoinbaseTransferValidation(t *testing.T) { t.Skip("It is skipped because testnet_actions.yaml doesn't match the chain ID") ctx := context.Background() cfg := config.Default cfg.Chain.ID = 1 chain := NewBlockchain(&cfg, InMemStateFactoryOption(), InMemDaoOption()) require.NotNil(t, chain) require.NoError(t, chain.Start(ctx)) defer require.NoError(t, chain.Stop(ctx)) pk, err := keypair.DecodePublicKey( "1d1727028b1e9dac0cafa693edd8496297f5c3281924ec578c0526e7340f7180bfa5af059084c8b90954bf2802a0060e145bece9580f9021352eb112340186e68dc9bea4f7711707") require.NoError(t, err) sk, err := keypair.DecodePrivateKey( "29cf385adfc5b1a84bd7e778ea2c056b85c977771005d545e54100266e224fc276ed7101") require.NoError(t, err) pkHash := keypair.HashPubKey(pk) addr := address.New(cfg.Chain.ID, pkHash[:]) iotxAddr := iotxaddress.Address{ PublicKey: pk, PrivateKey: sk, RawAddress: addr.IotxAddress(), } blk, err := chain.MintNewBlock(nil, nil, nil, &iotxAddr, "") require.NoError(t, err) validator := validator{} require.NoError(t, validator.verifyActions(blk, true)) } func TestValidateSecretBlock(t *testing.T) { cfg := &config.Default testutil.CleanupPath(t, cfg.Chain.TrieDBPath) defer testutil.CleanupPath(t, cfg.Chain.TrieDBPath) testutil.CleanupPath(t, cfg.Chain.ChainDBPath) defer testutil.CleanupPath(t, cfg.Chain.ChainDBPath) require := require.New(t) sf, err := state.NewFactory(cfg, state.DefaultTrieOption()) require.NoError(err) require.NoError(sf.Start(context.Background())) _, err = sf.LoadOrCreateState(ta.Addrinfo["producer"].RawAddress, Gen.TotalSupply) require.Nil(err) _, err = sf.RunActions(0, nil, nil, nil) require.Nil(err) require.Nil(sf.Commit()) idList := make([][]uint8, 0) delegates := []string{ta.Addrinfo["producer"].RawAddress} for i := 0; i < 20; i++ { addr, _ := iotxaddress.NewAddress(iotxaddress.IsTestnet, iotxaddress.ChainID) delegates = append(delegates, addr.RawAddress) } for _, delegate := range delegates { idList = append(idList, iotxaddress.CreateID(delegate)) } producerSK := crypto.DKG.SkGeneration() _, shares, witness, err := crypto.DKG.Init(producerSK, idList) require.NoError(err) secretProposals := make([]*action.SecretProposal, 0) for i, share := range shares { secretProposal, err := action.NewSecretProposal(uint64(i+1), delegates[0], delegates[i], share) require.NoError(err) secretProposals = append(secretProposals, secretProposal) } secretWitness, err := action.NewSecretWitness(uint64(22), delegates[0], witness) require.NoError(err) hash := secretProposals[0].Hash() blk := NewSecretBlock(1, 3, hash, clock.New(), secretProposals, secretWitness) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) val := validator{sf, delegates[1]} require.NoError(val.Validate(blk, 2, hash, false)) // Falsify secret proposal dummySecretProposal, err := action.NewSecretProposal(2, delegates[0], delegates[1], []uint32{1, 2, 3, 4, 5}) require.NoError(err) secretProposals[1] = dummySecretProposal blk = NewSecretBlock(1, 3, hash, clock.New(), secretProposals, secretWitness) err = blk.SignBlock(ta.Addrinfo["producer"]) require.NoError(err) err = val.Validate(blk, 2, hash, false) require.Error(err) require.Equal(ErrDKGSecretProposal, errors.Cause(err)) }
1
12,244
File is not `goimports`-ed
iotexproject-iotex-core
go
@@ -227,9 +227,6 @@ func invokeErrorToYARPCError(err error, responseMD metadata.MD) error { // CallStream implements transport.StreamOutbound#CallStream. func (o *Outbound) CallStream(ctx context.Context, request *transport.StreamRequest) (*transport.ClientStream, error) { - if _, ok := ctx.Deadline(); !ok { - return nil, yarpcerrors.InvalidArgumentErrorf("stream requests require a connection establishment timeout on the passed in context") - } if err := o.once.WaitUntilRunning(ctx); err != nil { return nil, err }
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package grpc import ( "bytes" "context" "io/ioutil" "strings" "sync" "time" "github.com/opentracing/opentracing-go" "go.uber.org/yarpc" "go.uber.org/yarpc/api/peer" "go.uber.org/yarpc/api/transport" intyarpcerrors "go.uber.org/yarpc/internal/yarpcerrors" peerchooser "go.uber.org/yarpc/peer" "go.uber.org/yarpc/peer/hostport" "go.uber.org/yarpc/pkg/lifecycle" "go.uber.org/yarpc/yarpcerrors" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) // UserAgent is the User-Agent that will be set for requests. // http://www.grpc.io/docs/guides/wire.html#user-agents const UserAgent = "yarpc-go/" + yarpc.Version var _ transport.UnaryOutbound = (*Outbound)(nil) // Outbound is a transport.UnaryOutbound. type Outbound struct { once *lifecycle.Once lock sync.Mutex t *Transport peerChooser peer.Chooser options *outboundOptions } func newSingleOutbound(t *Transport, address string, options ...OutboundOption) *Outbound { return newOutbound(t, peerchooser.NewSingle(hostport.PeerIdentifier(address), t), options...) } func newOutbound(t *Transport, peerChooser peer.Chooser, options ...OutboundOption) *Outbound { return &Outbound{ once: lifecycle.NewOnce(), t: t, peerChooser: peerChooser, options: newOutboundOptions(options), } } // Start implements transport.Lifecycle#Start. func (o *Outbound) Start() error { return o.once.Start(o.peerChooser.Start) } // Stop implements transport.Lifecycle#Stop. func (o *Outbound) Stop() error { return o.once.Stop(o.peerChooser.Stop) } // IsRunning implements transport.Lifecycle#IsRunning. func (o *Outbound) IsRunning() bool { return o.once.IsRunning() } // Transports implements transport.Inbound#Transports. func (o *Outbound) Transports() []transport.Transport { return []transport.Transport{o.t} } // Chooser returns the peer.Chooser associated with this Outbound. func (o *Outbound) Chooser() peer.Chooser { return o.peerChooser } // Call implements transport.UnaryOutbound#Call. func (o *Outbound) Call(ctx context.Context, request *transport.Request) (*transport.Response, error) { if request == nil { return nil, yarpcerrors.InvalidArgumentErrorf("request for grpc outbound was nil") } if err := o.once.WaitUntilRunning(ctx); err != nil { return nil, intyarpcerrors.AnnotateWithInfo(yarpcerrors.FromError(err), "error waiting for grpc outbound to start for service: %s", request.Service) } start := time.Now() var responseBody []byte var responseMD metadata.MD invokeErr := o.invoke(ctx, request, &responseBody, &responseMD, start) responseHeaders, err := getApplicationHeaders(responseMD) if err != nil { return nil, err } return &transport.Response{ Body: ioutil.NopCloser(bytes.NewBuffer(responseBody)), Headers: responseHeaders, ApplicationError: metadataToIsApplicationError(responseMD), }, invokeErrorToYARPCError(invokeErr, responseMD) } func (o *Outbound) invoke( ctx context.Context, request *transport.Request, responseBody *[]byte, responseMD *metadata.MD, start time.Time, ) (retErr error) { md, err := transportRequestToMetadata(request) if err != nil { return err } bytes, err := ioutil.ReadAll(request.Body) if err != nil { return err } fullMethod, err := procedureNameToFullMethod(request.Procedure) if err != nil { return err } var callOptions []grpc.CallOption if responseMD != nil { callOptions = []grpc.CallOption{grpc.Trailer(responseMD)} } apiPeer, onFinish, err := o.peerChooser.Choose(ctx, request) if err != nil { return err } defer func() { onFinish(retErr) }() grpcPeer, ok := apiPeer.(*grpcPeer) if !ok { return peer.ErrInvalidPeerConversion{ Peer: apiPeer, ExpectedType: "*grpcPeer", } } tracer := o.t.options.tracer createOpenTracingSpan := &transport.CreateOpenTracingSpan{ Tracer: tracer, TransportName: transportName, StartTime: start, ExtraTags: yarpc.OpentracingTags, } ctx, span := createOpenTracingSpan.Do(ctx, request) defer span.Finish() if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, mdReadWriter(md)); err != nil { return err } return transport.UpdateSpanWithErr( span, grpcPeer.clientConn.Invoke( metadata.NewOutgoingContext(ctx, md), fullMethod, bytes, responseBody, callOptions..., ), ) } func metadataToIsApplicationError(responseMD metadata.MD) bool { if responseMD == nil { return false } value, ok := responseMD[ApplicationErrorHeader] return ok && len(value) > 0 && len(value[0]) > 0 } func invokeErrorToYARPCError(err error, responseMD metadata.MD) error { if err == nil { return nil } if yarpcerrors.IsStatus(err) { return err } status, ok := status.FromError(err) // if not a yarpc error or grpc error, just return a wrapped error if !ok { return yarpcerrors.FromError(err) } code, ok := _grpcCodeToCode[status.Code()] if !ok { code = yarpcerrors.CodeUnknown } var name string if responseMD != nil { value, ok := responseMD[ErrorNameHeader] // TODO: what to do if the length is > 1? if ok && len(value) == 1 { name = value[0] } } message := status.Message() // we put the name as a prefix for grpc compatibility // if there was no message, the message will be the name, so we leave it as the message if name != "" && message != "" && message != name { message = strings.TrimPrefix(message, name+": ") } else if name != "" && message == name { message = "" } return intyarpcerrors.NewWithNamef(code, name, message) } // CallStream implements transport.StreamOutbound#CallStream. func (o *Outbound) CallStream(ctx context.Context, request *transport.StreamRequest) (*transport.ClientStream, error) { if _, ok := ctx.Deadline(); !ok { return nil, yarpcerrors.InvalidArgumentErrorf("stream requests require a connection establishment timeout on the passed in context") } if err := o.once.WaitUntilRunning(ctx); err != nil { return nil, err } return o.stream(ctx, request, time.Now()) } func (o *Outbound) stream( ctx context.Context, req *transport.StreamRequest, start time.Time, ) (_ *transport.ClientStream, err error) { if req.Meta == nil { return nil, yarpcerrors.InvalidArgumentErrorf("stream request requires a request metadata") } treq := req.Meta.ToRequest() md, err := transportRequestToMetadata(treq) if err != nil { return nil, err } fullMethod, err := procedureNameToFullMethod(req.Meta.Procedure) if err != nil { return nil, err } apiPeer, onFinish, err := o.peerChooser.Choose(ctx, treq) if err != nil { return nil, err } defer func() { onFinish(err) }() grpcPeer, ok := apiPeer.(*grpcPeer) if !ok { return nil, peer.ErrInvalidPeerConversion{ Peer: apiPeer, ExpectedType: "*grpcPeer", } } tracer := o.t.options.tracer createOpenTracingSpan := &transport.CreateOpenTracingSpan{ Tracer: tracer, TransportName: transportName, StartTime: start, ExtraTags: yarpc.OpentracingTags, } _, span := createOpenTracingSpan.Do(ctx, treq) if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, mdReadWriter(md)); err != nil { span.Finish() return nil, err } // We use a different context for the lifetime of the stream and the time it // took to establish a connection with the peer, setting the stream's // context to context.Background() means the lifetime of this stream has no // timeout. streamCtx := metadata.NewOutgoingContext(context.Background(), md) clientStream, err := grpcPeer.clientConn.NewStream( streamCtx, &grpc.StreamDesc{ ClientStreams: true, ServerStreams: true, }, fullMethod, ) if err != nil { span.Finish() return nil, err } stream := newClientStream(streamCtx, req, clientStream, span) tClientStream, err := transport.NewClientStream(stream) if err != nil { span.Finish() return nil, err } return tClientStream, nil }
1
16,391
do we want to still validate that the context had a nonzero TTL? i suppose it's fine to let clients determine whether or not their streams will timeout (though we still don't have the means to cancel them from the server other than EOF), and they can still cancel their own contexts - so maybe not a huge deal, but thought it was worth bringing up.
yarpc-yarpc-go
go
@@ -72,4 +72,9 @@ public class JavaNameFormatter implements NameFormatter { public String classFileNameBase(Name name) { return name.toOriginal(); } + + @Override + public String humanName(Name name) { + throw new UnsupportedOperationException(); + } }
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.util.java; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.NameFormatter; import com.google.api.codegen.util.NamePath; /** * The NameFormatter for Java. */ public class JavaNameFormatter implements NameFormatter { @Override public String className(Name name) { return name.toUpperCamel(); } @Override public String varName(Name name) { return name.toLowerCamel(); } @Override public String varReference(Name name) { return varName(name); } @Override public String methodName(Name name) { return name.toLowerCamel(); } @Override public String staticFunctionName(Name name) { return name.toLowerCamel(); } @Override public String inittedConstantName(Name name) { return name.toUpperUnderscore(); } @Override public String keyName(Name name) { return name.toLowerCamel(); } @Override public String qualifiedName(NamePath namePath) { return namePath.toDotted(); } @Override public String packageFilePathPiece(Name name) { return name.toOriginal(); } @Override public String classFileNameBase(Name name) { return name.toOriginal(); } }
1
17,330
Go ahead and provide the same implementation for all of the languages - I'm not sure there's a reason for it to be different.
googleapis-gapic-generator
java
@@ -253,6 +253,15 @@ class TestApp(tornado.testing.AsyncHTTPTestCase): assert self.put_json("/settings", {"anticache": True}).code == 200 assert self.put_json("/settings", {"wtf": True}).code == 400 + def test_options(self): + j = json(self.fetch("/options")) + assert type(j) == list + assert type(j[0]) == dict + + def test_option_update(self): + assert self.put_json("/options", {"anticache": True}).code == 200 + assert self.put_json("/options", {"wtf": True}).code == 400 + def test_err(self): with mock.patch("mitmproxy.tools.web.app.IndexHandler.get") as f: f.side_effect = RuntimeError
1
import json as _json from unittest import mock import os import tornado.testing from tornado import httpclient from tornado import websocket from mitmproxy import exceptions from mitmproxy import proxy from mitmproxy import options from mitmproxy.test import tflow from mitmproxy.tools.web import app from mitmproxy.tools.web import master as webmaster def json(resp: httpclient.HTTPResponse): return _json.loads(resp.body.decode()) class TestApp(tornado.testing.AsyncHTTPTestCase): def get_app(self): o = options.Options(http2=False) m = webmaster.WebMaster(o, proxy.DummyServer(), with_termlog=False) f = tflow.tflow(resp=True) f.id = "42" m.view.add([f]) m.view.add([tflow.tflow(err=True)]) m.add_log("test log", "info") self.master = m self.view = m.view self.events = m.events webapp = app.Application(m, None) webapp.settings["xsrf_cookies"] = False return webapp def fetch(self, *args, **kwargs) -> httpclient.HTTPResponse: # tornado disallows POST without content by default. return super().fetch(*args, **kwargs, allow_nonstandard_methods=True) def put_json(self, url, data: dict) -> httpclient.HTTPResponse: return self.fetch( url, method="PUT", body=_json.dumps(data), headers={"Content-Type": "application/json"}, ) def test_index(self): assert self.fetch("/").code == 200 def test_filter_help(self): assert self.fetch("/filter-help").code == 200 def test_flows(self): resp = self.fetch("/flows") assert resp.code == 200 assert json(resp)[0]["request"]["contentHash"] assert json(resp)[1]["error"] def test_flows_dump(self): resp = self.fetch("/flows/dump") assert b"address" in resp.body self.view.clear() assert not len(self.view) assert self.fetch("/flows/dump", method="POST", body=resp.body).code == 200 assert len(self.view) def test_clear(self): events = self.events.data.copy() flows = list(self.view) assert self.fetch("/clear", method="POST").code == 200 assert not len(self.view) assert not len(self.events.data) # restore for f in flows: self.view.add([f]) self.events.data = events def test_resume(self): for f in self.view: f.intercept() assert self.fetch( "/flows/42/resume", method="POST").code == 200 assert sum(f.intercepted for f in self.view) == 1 assert self.fetch("/flows/resume", method="POST").code == 200 assert all(not f.intercepted for f in self.view) def test_kill(self): for f in self.view: f.backup() f.intercept() assert self.fetch("/flows/42/kill", method="POST").code == 200 assert sum(f.killable for f in self.view) == 1 assert self.fetch("/flows/kill", method="POST").code == 200 assert all(not f.killable for f in self.view) for f in self.view: f.revert() def test_flow_delete(self): f = self.view.get_by_id("42") assert f assert self.fetch("/flows/42", method="DELETE").code == 200 assert not self.view.get_by_id("42") self.view.add([f]) assert self.fetch("/flows/1234", method="DELETE").code == 404 def test_flow_update(self): f = self.view.get_by_id("42") assert f.request.method == "GET" f.backup() upd = { "request": { "method": "PATCH", "port": 123, "headers": [("foo", "bar")], "content": "req", }, "response": { "msg": "Non-Authorisé", "code": 404, "headers": [("bar", "baz")], "content": "resp", } } assert self.put_json("/flows/42", upd).code == 200 assert f.request.method == "PATCH" assert f.request.port == 123 assert f.request.headers["foo"] == "bar" assert f.request.text == "req" assert f.response.msg == "Non-Authorisé" assert f.response.status_code == 404 assert f.response.headers["bar"] == "baz" assert f.response.text == "resp" f.revert() assert self.put_json("/flows/42", {"foo": 42}).code == 400 assert self.put_json("/flows/42", {"request": {"foo": 42}}).code == 400 assert self.put_json("/flows/42", {"response": {"foo": 42}}).code == 400 assert self.fetch("/flows/42", method="PUT", body="{}").code == 400 assert self.fetch( "/flows/42", method="PUT", headers={"Content-Type": "application/json"}, body="!!" ).code == 400 def test_flow_duplicate(self): resp = self.fetch("/flows/42/duplicate", method="POST") assert resp.code == 200 f = self.view.get_by_id(resp.body.decode()) assert f assert f.id != "42" self.view.remove([f]) def test_flow_revert(self): f = self.view.get_by_id("42") f.backup() f.request.method = "PATCH" self.fetch("/flows/42/revert", method="POST") assert not f._backup def test_flow_replay(self): with mock.patch("mitmproxy.master.Master.replay_request") as replay_request: assert self.fetch("/flows/42/replay", method="POST").code == 200 assert replay_request.called replay_request.side_effect = exceptions.ReplayException( "out of replays" ) assert self.fetch("/flows/42/replay", method="POST").code == 400 def test_flow_content(self): f = self.view.get_by_id("42") f.backup() f.response.headers["Content-Encoding"] = "ran\x00dom" f.response.headers["Content-Disposition"] = 'inline; filename="filename.jpg"' r = self.fetch("/flows/42/response/content") assert r.body == b"message" assert r.headers["Content-Encoding"] == "random" assert r.headers["Content-Disposition"] == 'attachment; filename="filename.jpg"' del f.response.headers["Content-Disposition"] f.request.path = "/foo/bar.jpg" assert self.fetch( "/flows/42/response/content" ).headers["Content-Disposition"] == 'attachment; filename=bar.jpg' f.response.content = b"" assert self.fetch("/flows/42/response/content").code == 400 f.revert() def test_update_flow_content(self): assert self.fetch( "/flows/42/request/content", method="POST", body="new" ).code == 200 f = self.view.get_by_id("42") assert f.request.content == b"new" assert f.modified() f.revert() def test_update_flow_content_multipart(self): body = ( b'--somefancyboundary\r\n' b'Content-Disposition: form-data; name="a"; filename="a.txt"\r\n' b'\r\n' b'such multipart. very wow.\r\n' b'--somefancyboundary--\r\n' ) assert self.fetch( "/flows/42/request/content", method="POST", headers={"Content-Type": 'multipart/form-data; boundary="somefancyboundary"'}, body=body ).code == 200 f = self.view.get_by_id("42") assert f.request.content == b"such multipart. very wow." assert f.modified() f.revert() def test_flow_content_view(self): assert json(self.fetch("/flows/42/request/content/raw")) == { "lines": [ [["text", "content"]] ], "description": "Raw" } def test_events(self): resp = self.fetch("/events") assert resp.code == 200 assert json(resp)[0]["level"] == "info" def test_settings(self): assert json(self.fetch("/settings"))["mode"] == "regular" def test_settings_update(self): assert self.put_json("/settings", {"anticache": True}).code == 200 assert self.put_json("/settings", {"wtf": True}).code == 400 def test_err(self): with mock.patch("mitmproxy.tools.web.app.IndexHandler.get") as f: f.side_effect = RuntimeError assert self.fetch("/").code == 500 @tornado.testing.gen_test def test_websocket(self): ws_url = "ws://localhost:{}/updates".format(self.get_http_port()) ws_client = yield websocket.websocket_connect(ws_url) self.master.options.anticomp = True response = yield ws_client.read_message() assert _json.loads(response) == { "resource": "settings", "cmd": "update", "data": {"anticomp": True}, } ws_client.close() # trigger on_close by opening a second connection. ws_client2 = yield websocket.websocket_connect(ws_url) ws_client2.close() def test_generate_tflow_js(self): _tflow = app.flow_to_json(tflow.tflow(resp=True, err=True)) # Set some value as constant, so that _tflow.js would not change every time. _tflow['client_conn']['id'] = "4a18d1a0-50a1-48dd-9aa6-d45d74282939" _tflow['id'] = "d91165be-ca1f-4612-88a9-c0f8696f3e29" _tflow['error']['timestamp'] = 1495370312.4814785 _tflow['response']['timestamp_end'] = 1495370312.4814625 _tflow['response']['timestamp_start'] = 1495370312.481462 _tflow['server_conn']['id'] = "f087e7b2-6d0a-41a8-a8f0-e1a4761395f8" tflow_json = _json.dumps(_tflow, indent=4, sort_keys=True) here = os.path.abspath(os.path.dirname(__file__)) web_root = os.path.join(here, os.pardir, os.pardir, os.pardir, os.pardir, 'web') tflow_path = os.path.join(web_root, 'src/js/__tests__/ducks/_tflow.js') content = """export default function(){{\n return {tflow_json}\n}}""".format(tflow_json=tflow_json) with open(tflow_path, 'w') as f: f.write(content)
1
13,335
We should also test type confusion here (e.g. `{"anticache": "foo"}`)
mitmproxy-mitmproxy
py
@@ -236,6 +236,10 @@ MESSAGE end sh.cmd 'unformatted=`dartfmt -n .`' + # If `dartfmt` fails for some reason + sh.if '$? -ne 0' do + sh.failure "" + end sh.if '! -z "$unformatted"' do sh.echo "Files are unformatted:", ansi: :red sh.echo "$unformatted"
1
module Travis module Build class Script class Dart < Script DEFAULTS = { dart: 'stable', with_content_shell: false, install_dartium: false, xvfb: true } attr_reader :task def initialize(*args) super @task = config[:dart_task] || {} @task = {task.to_sym => true} if task.is_a?(String) @task[:install_dartium] = config[:install_dartium] unless task.include?(:install_dartium) @task[:xvfb] = config[:xvfb] unless task.include?(:xvfb) @task[:dart] ||= config[:dart] # Run "pub run test" by default if no other tasks are specified. @task[:test] ||= true if !@task[:dartanalyzer] && !@task[:dartfmt] end def configure super if config[:with_content_shell] if config.include?(:dart_task) sh.failure "with_content_shell can't be used with dart_task." return elsif config[:install_dartium] sh.failure "with_content_shell can't be used with install_dartium." return elsif !config[:xvfb] sh.failure "with_content_shell can't be used with xvfb." return end sh.fold "deprecated.with_content_shell" do sh.deprecate <<MESSAGE DEPRECATED: with_content_shell is deprecated. Instead use: dart_task: - test: --platform vm - test: --platform firefox - test: --platform dartium install_dartium: true MESSAGE end sh.fold 'content_shell_dependencies_install' do sh.echo 'Installing Content Shell dependencies', ansi: :yellow # Enable Multiverse Packages: sh.cmd "sudo sh -c 'echo \"deb http://gce_debian_mirror.storage.googleapis.com precise contrib non-free\" >> /etc/apt/sources.list'" sh.cmd "sudo sh -c 'echo \"deb http://gce_debian_mirror.storage.googleapis.com precise-updates contrib non-free\" >> /etc/apt/sources.list'" sh.cmd "sudo sh -c 'apt-get update'" # Pre-accepts MSFT Fonts EULA: sh.cmd "sudo sh -c 'echo ttf-mscorefonts-installer msttcorefonts/accepted-mscorefonts-eula select true | debconf-set-selections'" # Install all dependencies: sh.cmd "sudo sh -c 'apt-get install --no-install-recommends -y -q chromium-browser libudev0 ttf-kochi-gothic ttf-kochi-mincho ttf-mscorefonts-installer ttf-indic-fonts ttf-dejavu-core ttf-indic-fonts-core fonts-thai-tlwg msttcorefonts xvfb'" end end end def export super sh.export 'TRAVIS_DART_VERSION', task[:dart], echo: false end def setup super sh.echo 'Dart for Travis-CI is not officially supported, ' \ 'but is community maintained.', ansi: :green sh.echo 'Please file any issues using the following link', ansi: :green sh.echo ' https://github.com/travis-ci/travis-ci/issues' \ '/new?labels=community:dart', ansi: :green sh.echo 'and mention \`@nex3\` and \`@a14n\`' \ ' in the issue', ansi: :green sh.export 'PUB_ENVIRONMENT', 'travis' sh.fold 'dart_install' do sh.echo 'Installing Dart', ansi: :yellow sh.cmd "curl #{archive_url}/sdk/dartsdk-#{os}-x64-release.zip > $HOME/dartsdk.zip" sh.cmd "unzip $HOME/dartsdk.zip -d $HOME > /dev/null" sh.cmd "rm $HOME/dartsdk.zip" sh.cmd 'export DART_SDK="$HOME/dart-sdk"' sh.cmd 'export PATH="$DART_SDK/bin:$PATH"' sh.cmd 'export PATH="$HOME/.pub-cache/bin:$PATH"' end if task[:install_dartium] sh.fold 'dartium_install' do sh.echo 'Installing Dartium', anis: :yellow sh.cmd "mkdir $HOME/dartium" sh.cmd "cd $HOME/dartium" sh.cmd "curl #{archive_url}/dartium/dartium-#{os}-x64-release.zip > dartium.zip" sh.cmd "unzip dartium.zip > /dev/null" sh.cmd "rm dartium.zip" sh.cmd 'dartium_dir="${PWD%/}/$(ls)"' # The executable has to be named "dartium" in order for the test # runner to find it. if os == 'macos' sh.cmd 'ln -s "$dartium_dir/Chromium.app/Contents/MacOS/Chromium" dartium' else sh.cmd 'ln -s "$dartium_dir/chrome" dartium' end sh.cmd 'export PATH="$PWD:$PATH"' sh.cmd "cd -" end end if config[:with_content_shell] if config[:os] != 'linux' sh.failure 'Content shell only supported on Linux' end sh.fold 'content_shell_install' do sh.echo 'Installing Content Shell', ansi: :yellow # Download and install Content Shell sh.cmd "mkdir $HOME/content_shell" sh.cmd "cd $HOME/content_shell" sh.cmd "curl #{archive_url}/dartium/content_shell-linux-x64-release.zip > content_shell.zip" sh.cmd "unzip content_shell.zip > /dev/null" sh.cmd "rm content_shell.zip" sh.cmd 'export PATH="${PWD%/}/$(ls):$PATH"' sh.cmd "cd -" end end end def announce super sh.cmd 'dart --version' sh.echo '' end def install sh.if '-f pubspec.yaml' do sh.fold 'pub_get' do sh.cmd "pub get" end end end def script # tests with test package sh.if package_installed?('test'), raw: true do if config[:with_content_shell] sh.export 'DISPLAY', ':99.0' sh.cmd 'sh -e /etc/init.d/xvfb start' # give xvfb some time to start sh.cmd 't=0; until (xdpyinfo -display :99 &> /dev/null || test $t -gt 10); do sleep 1; let t=$t+1; done' sh.cmd 'pub run test -p vm -p content-shell -p firefox' else pub_run_test end end # tests with test_runner for old tests written with unittest package sh.elif package_installed?('unittest'), raw: true do sh.fold "deprecated.unittest" do sh.deprecate <<MESSAGE DEPRECATED: The unittest package is deprecated. Please upgrade to the test package. See https://github.com/dart-lang/test#readme. MESSAGE end sh.fold 'test_runner_install' do sh.echo 'Installing Test Runner', ansi: :yellow sh.cmd 'pub global activate test_runner' end if config[:with_content_shell] sh.cmd 'xvfb-run -s "-screen 0 1024x768x24" pub global run test_runner --disable-ansi' else sh.cmd 'pub global run test_runner --disable-ansi --skip-browser-tests' end end dartanalyzer if run_dartanalyzer? dartfmt if run_dartfmt? end private def run_dartanalyzer? !!task[:dartanalyzer] end def run_dartfmt? !!task[:dartfmt] end def pub_run_test args = task[:test] unless args return sh.raw ':' end args = args.is_a?(String) ? " #{args}" : "" # Mac OS doesn't need or support xvfb-run. xvfb_run = 'xvfb-run -s "-screen 0 1024x768x24" ' xvfb_run = '' if task[:xvfb] == false || os == "macos" sh.cmd "#{xvfb_run}pub run test#{args}" end def dartanalyzer args = task[:dartanalyzer] args = '.' unless args.is_a?(String) sh.cmd "dartanalyzer #{args}" end def dartfmt args = task[:dartfmt] if args.is_a?(String) sh.echo "dartfmt arguments aren't supported.", ansi: :red end sh.if package_installed?('dart_style'), raw: true do sh.raw 'function dartfmt() { pub run dart_style:format "$@"; }' end sh.cmd 'unformatted=`dartfmt -n .`' sh.if '! -z "$unformatted"' do sh.echo "Files are unformatted:", ansi: :red sh.echo "$unformatted" sh.failure "" end end def package_installed?(package) "[[ -d packages/#{package} ]] || grep -q ^#{package}: .packages 2> /dev/null" end def os config[:os] == 'osx' ? 'macos' : 'linux' end def archive_url url_end = '' # support of "dev" or "stable" if ["stable", "dev"].include?(task[:dart]) url_end = "#{task[:dart]}/release/latest" # support of "stable/release/1.15.0" or "be/raw/110749" elsif task[:dart].include?("/") url_end = task[:dart] # support of dev versions like "1.16.0-dev.2.0" or "1.16.0-dev.2.0" elsif task[:dart].include?("-dev") url_end = "dev/release/#{task[:dart]}" # support of stable versions like "1.14.0" or "1.14.1" else url_end = "stable/release/#{task[:dart]}" end "https://storage.googleapis.com/dart-archive/channels/#{url_end}" end end end end end
1
15,244
I'd love to trim the content of `unformatted` too. Any ideas?
travis-ci-travis-build
rb
@@ -17,6 +17,7 @@ """ Utilities to deal with types. This is mostly focused on python3. """ + import typing from inspect import getfullargspec from functools import wraps
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Utilities to deal with types. This is mostly focused on python3. """ import typing from inspect import getfullargspec from functools import wraps import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa from pyspark.sql import Column from pyspark.sql.functions import pandas_udf import pyspark.sql.types as types from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. __all__ = ['Col', 'pandas_wraps', 'as_spark_type', 'as_python_type', 'infer_pd_series_spark_type'] T = typing.TypeVar("T") class Col(typing.Generic[T]): def is_col(self): return self # A column of data, with the data type. class _Column(object): def __init__(self, inner): self.inner = inner # type: types.DataType def __repr__(self): return "_ColumnType[{}]".format(self.inner) class _DataFrame(object): def __repr__(self): return "_DataFrameType" # The type is a scalar type that is furthermore understood by Spark. class _Scalar(object): def __init__(self, tpe): self.type = tpe # type: types.DataType def __repr__(self): return "_ScalarType[{}]".format(self.type) # The type is left unspecified or we do not know about this type. class _Unknown(object): def __init__(self, tpe): self.type = tpe def __repr__(self): return "_UnknownType[{}]".format(self.type) X = typing.Union[_Column, _DataFrame, _Scalar, _Unknown] def _is_col(tpe): return hasattr(tpe, "is_col") def _get_col_inner(tpe): return tpe.__args__[0] def _to_stype(tpe) -> X: if _is_col(tpe): inner = as_spark_type(_get_col_inner(tpe)) return _Column(inner) inner = as_spark_type(tpe) if inner is None: return _Unknown(tpe) else: return _Scalar(inner) # First element of the list is the python base type _base = { types.StringType(): [str, 'str', 'string'], types.ByteType(): [np.int8, 'int8', 'byte'], types.ShortType(): [np.int16, 'int16', 'short'], types.IntegerType(): [int, 'int', np.int], types.LongType(): [np.int64, 'int64', 'long', 'bigint'], types.FloatType(): [float, 'float', np.float], types.DoubleType(): [np.float64, 'float64', 'double'], types.TimestampType(): [np.datetime64], types.BooleanType(): [bool, 'boolean', 'bool', np.bool], } def _build_type_dict(): return dict([(other_type, spark_type) for (spark_type, l) in _base.items() for other_type in l] + [(spark_type, spark_type) for (spark_type, _) in _base.items()]) def _build_py_type_dict(): return dict([(spark_type, l[0]) for (spark_type, l) in _base.items()]) _known_types = _build_type_dict() _py_conversions = _build_py_type_dict() def as_spark_type(tpe) -> types.DataType: """ Given a python type, returns the equivalent spark type. Accepts: - the built-in types in python - the built-in types in numpy - list of pairs of (field_name, type) - dictionaries of field_name -> type - python3's typing system :param tpe: :return: """ return _known_types.get(tpe, None) def as_python_type(spark_tpe): return _py_conversions.get(spark_tpe, None) def infer_pd_series_spark_type(s: pd.Series) -> types.DataType: """Infer Spark DataType from pandas Series dtype. :param s: :class:`pandas.Series` to be inferred :return: the inferred Spark data type """ dt = s.dtype if dt == np.dtype('object'): if len(s) == 0 or s.isnull().all(): raise ValueError("can not infer schema from empty or null dataset") return types.from_arrow_type(pa.Array.from_pandas(s).type) elif is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): return types.TimestampType() else: return types.from_arrow_type(pa.from_numpy_dtype(dt)) def _make_fun(f: typing.Callable, return_type: types.DataType, *args, **kwargs) -> 'ks.Series': """ This function calls the function f while taking into account some of the limitations of the pandas UDF support: - support for keyword arguments - support for scalar values (as long as they are picklable) - support for type hints and input checks. :param f: the function to call. It is expected to have field annotations (see below). :param return_sig: the return type :param args: the arguments of the function :param kwargs: the kwargs to pass to the function :return: the value of executing the function: f(*args, **kwargs) The way this function executes depends on the what is provided as arguments: - if one of the arguments is a koalas series or dataframe: - the function is wrapped as a Spark UDF - the series arguments are checked to be coming from the same original anchor - the non-series arguments are serialized into the spark UDF. The function is expected to have the following arguments: """ from databricks.koalas.series import Series # All the arguments. # None for columns or the value for non-columns frozen_args = [] # type: typing.List[typing.Any] # ks.Series for columns or None for the non-columns col_args = [] # type: typing.List[typing.Optional[Series]] for arg in args: if isinstance(arg, Series): frozen_args.append(None) col_args.append(arg) elif isinstance(arg, Column): raise ValueError('A pyspark column was passed as an argument.' ' Pass a koalas series instead') else: frozen_args.append(arg) col_args.append(None) # Value is none for kwargs that are columns, and the value otherwise frozen_kwargs = [] # type: typing.List[typing.Tuple[str, typing.Any]] # Value is a spark col for kwarg that is column, and None otherwise col_kwargs = [] # type: typing.List[typing.Tuple[str, Series]] for (key, arg) in kwargs.items(): if isinstance(arg, Series): col_kwargs.append((key, arg)) elif isinstance(arg, Column): raise ValueError('A pyspark column was passed as an argument.' ' Pass a koalas series instead') else: frozen_kwargs.append((key, arg)) col_args_idxs = [idx for (idx, c) in enumerate(col_args) if c is not None] all_indexes = (col_args_idxs + [key for (key, _) in col_kwargs]) # type: ignore if not all_indexes: # No argument is related to spark # The function is just called through without other considerations. return f(*args, **kwargs) # We detected some columns. They need to be wrapped in a UDF to spark. (index_map, kdf) = _get_metadata(args, kwargs) def clean_fun(*args2): assert len(args2) == len(all_indexes), \ "Missing some inputs:{}!={}".format(all_indexes, [str(c) for c in args2]) full_args = list(frozen_args) full_kwargs = dict(frozen_kwargs) for (arg, idx) in zip(args2, all_indexes): if isinstance(idx, int): full_args[idx] = arg else: assert isinstance(idx, str), str(idx) full_kwargs[idx] = arg return f(*full_args, **full_kwargs) wrapped_udf = pandas_udf(clean_fun, returnType=return_type) name_tokens = [] spark_col_args = [] for col in col_args: if col is not None: spark_col_args.append(col._scol) name_tokens.append(col.name) kw_name_tokens = [] for (key, col) in col_kwargs: spark_col_args.append(col._scol) kw_name_tokens.append("{}={}".format(key, col.name)) col = wrapped_udf(*spark_col_args) series = Series(data=col, index=index_map, anchor=kdf) all_name_tokens = name_tokens + sorted(kw_name_tokens) name = "{}({})".format(f.__name__, ", ".join(all_name_tokens)) series = series.astype(return_type).alias(name) return series def _get_metadata(args, kwargs): from databricks.koalas.series import Series all_cols = ([arg for arg in args if isinstance(arg, Series)] + [arg for arg in kwargs.values() if isinstance(arg, Series)]) assert all_cols # TODO: check all the anchors s = all_cols[0] return (s._index_map, s._kdf) def pandas_wraps(function=None, return_col=None, return_scalar=None): """ This annotation makes a function available for Koalas. Spark requires more information about the return types than pandas, and sometimes more information is required. This annotations allows you to seamlessly write functions that work for both pandas and koalas. Examples -------- Wrapping a function with python 3's type annotations: >>> from databricks.koalas import pandas_wraps, Col >>> pdf = pd.DataFrame({"col1": [1, 2], "col2": [10, 20]}, dtype=np.int64) >>> df = ks.DataFrame(pdf) Consider a simple function that operates on pandas series of integers >>> def fun(col1): ... return col1.apply(lambda x: x * 2) # Arbitrary pandas code. >>> fun(pdf.col1) 0 2 1 4 Name: col1, dtype: int64 Koalas needs to know the return type in order to make this function accessible to Spark. The following function uses python built-in typing hint system to hint that this function returns a Series of integers: >>> @pandas_wraps ... def fun(col1) -> Col[np.int64]: ... return col1.apply(lambda x: x * 2) # Arbitrary pandas code. This function works as before on pandas Series: >>> fun(pdf.col1) 0 2 1 4 Name: col1, dtype: int64 Now it also works on Koalas series: >>> fun(df.col1) 0 2 1 4 Name: fun(col1), dtype: int64 Alternatively, the type hint can be provided as an argument to the `pandas_wraps` decorator: >>> @pandas_wraps(return_col=np.int64) ... def fun(col1): ... return col1.apply(lambda x: x * 2) # Arbitrary pandas code. >>> fun(df.col1) 0 2 1 4 Name: fun(col1), dtype: int64 Unlike PySpark user-defined functions, the decorator supports arguments all of python's styles of arguments (named arguments, optional arguments, list and keyworded arguments). It will automatically distribute argument values that are not Koalas series. Here is an example of function with optional series arguments and non-series arguments: >>> @pandas_wraps(return_col=float) ... def fun(col1, col2 = None, arg1="x", **kwargs): ... return 2.0 * col1 if arg1 == "x" else 3.0 * col1 * col2 * kwargs['col3'] >>> fun(df.col1) 0 2.0 1 4.0 Name: fun(col1), dtype: float32 >>> fun(df.col1, col2=df.col2, arg1="y", col3=df.col2) 0 300.0 1 2400.0 Name: fun(col1, col2=col2, col3=col2), dtype: float32 Notes ----- The arguments provided to the function must be picklable, or an error will be raised by Spark: >>> import sys >>> # fun(df.col1, arg1=sys.stdout) # Will fail! """ def function_wrapper(f): @wraps(f) def wrapper(*args, **kwargs): # Extract the signature arguments from this function. sig_return = _infer_return_type(f, return_col, return_scalar) if not isinstance(sig_return, _Column): raise ValueError("Expected the return type of this function to be of type column," " but found type {}".format(sig_return)) spark_return_type = sig_return.inner return _make_fun(f, spark_return_type, *args, **kwargs) return wrapper if return_col is not None or return_scalar is not None: return function_wrapper return function_wrapper(function) def _infer_return_type(f, return_col_hint=None, return_scalar_hint=None) -> X: spec = getfullargspec(f) return_sig = spec.annotations.get("return", None) return _get_return_type(return_sig, return_col_hint, return_scalar_hint) def _get_return_type(return_sig, return_col, return_scalar) -> X: """ Resolves the return type. :return: X """ if not (return_col or return_sig or return_scalar): raise ValueError( "Missing type information. It should either be provided as an argument to " "pandas_wraps, or as a python typing hint") if return_col is not None: if isinstance(return_col, Col): return _to_stype(return_col) inner = as_spark_type(return_col) return _Column(inner) if return_scalar is not None: if isinstance(return_scalar, Col): raise ValueError("Column return type {}, you should use 'return_col' to specify" " it.".format(return_scalar)) inner = as_spark_type(return_scalar) return _Scalar(inner) if return_sig is not None: return _to_stype(return_sig) assert False
1
8,612
can you remove this unrelated change?
databricks-koalas
py
@@ -145,6 +145,17 @@ class MainWindow(QWidget): _private: Whether the window is in private browsing mode. """ + # Application wide stylesheets + STYLESHEET = """ + QLabel#hint { + background-color: {{ conf.colors.hints.bg }}; + color: {{ conf.colors.hints.fg }}; + font: {{ conf.fonts.hints }}; + border: {{ conf.hints.border }}; + padding-left: -3px; + } + """ + def __init__(self, *, private, geometry=None, parent=None): """Create a new main window.
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2019 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The main window of qutebrowser.""" import binascii import base64 import itertools import functools from PyQt5.QtCore import (pyqtSlot, QRect, QPoint, QTimer, Qt, QCoreApplication, QEventLoop) from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QSizePolicy from qutebrowser.commands import runners from qutebrowser.api import cmdutils from qutebrowser.config import config, configfiles from qutebrowser.utils import (message, log, usertypes, qtutils, objreg, utils, jinja, debug) from qutebrowser.mainwindow import messageview, prompt from qutebrowser.completion import completionwidget, completer from qutebrowser.keyinput import modeman from qutebrowser.browser import commands, downloadview, hints, downloads from qutebrowser.misc import crashsignal, keyhintwidget win_id_gen = itertools.count(0) def get_window(via_ipc, force_window=False, force_tab=False, force_target=None, no_raise=False): """Helper function for app.py to get a window id. Args: via_ipc: Whether the request was made via IPC. force_window: Whether to force opening in a window. force_tab: Whether to force opening in a tab. force_target: Override the new_instance_open_target config no_raise: suppress target window raising Return: ID of a window that was used to open URL """ if force_window and force_tab: raise ValueError("force_window and force_tab are mutually exclusive!") if not via_ipc: # Initial main window return 0 open_target = config.val.new_instance_open_target # Apply any target overrides, ordered by precedence if force_target is not None: open_target = force_target if force_window: open_target = 'window' if force_tab and open_target == 'window': # Command sent via IPC open_target = 'tab-silent' window = None should_raise = False # Try to find the existing tab target if opening in a tab if open_target != 'window': window = get_target_window() should_raise = open_target not in ['tab-silent', 'tab-bg-silent'] # Otherwise, or if no window was found, create a new one if window is None: window = MainWindow(private=None) window.show() should_raise = True if should_raise and not no_raise: raise_window(window) return window.win_id def raise_window(window, alert=True): """Raise the given MainWindow object.""" window.setWindowState(window.windowState() & ~Qt.WindowMinimized) window.setWindowState(window.windowState() | Qt.WindowActive) window.raise_() # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-69568 QCoreApplication.processEvents( QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers) window.activateWindow() if alert: QApplication.instance().alert(window) def get_target_window(): """Get the target window for new tabs, or None if none exist.""" try: win_mode = config.val.new_instance_open_target_window if win_mode == 'last-focused': return objreg.last_focused_window() elif win_mode == 'first-opened': return objreg.window_by_index(0) elif win_mode == 'last-opened': return objreg.window_by_index(-1) elif win_mode == 'last-visible': return objreg.last_visible_window() else: raise ValueError("Invalid win_mode {}".format(win_mode)) except objreg.NoWindow: return None class MainWindow(QWidget): """The main window of qutebrowser. Adds all needed components to a vbox, initializes sub-widgets and connects signals. Attributes: status: The StatusBar widget. tabbed_browser: The TabbedBrowser widget. state_before_fullscreen: window state before activation of fullscreen. _downloadview: The DownloadView widget. _vbox: The main QVBoxLayout. _commandrunner: The main CommandRunner instance. _overlays: Widgets shown as overlay for the current webpage. _private: Whether the window is in private browsing mode. """ def __init__(self, *, private, geometry=None, parent=None): """Create a new main window. Args: geometry: The geometry to load, as a bytes-object (or None). private: Whether the window is in private browsing mode. parent: The parent the window should get. """ super().__init__(parent) # Late import to avoid a circular dependency # - browsertab -> hints -> webelem -> mainwindow -> bar -> browsertab from qutebrowser.mainwindow import tabbedbrowser from qutebrowser.mainwindow.statusbar import bar self.setAttribute(Qt.WA_DeleteOnClose) self._commandrunner = None self._overlays = [] self.win_id = next(win_id_gen) self.registry = objreg.ObjectRegistry() objreg.window_registry[self.win_id] = self objreg.register('main-window', self, scope='window', window=self.win_id) tab_registry = objreg.ObjectRegistry() objreg.register('tab-registry', tab_registry, scope='window', window=self.win_id) message_bridge = message.MessageBridge(self) objreg.register('message-bridge', message_bridge, scope='window', window=self.win_id) self.setWindowTitle('qutebrowser') self._vbox = QVBoxLayout(self) self._vbox.setContentsMargins(0, 0, 0, 0) self._vbox.setSpacing(0) self._init_downloadmanager() self._downloadview = downloadview.DownloadView(self.win_id) if config.val.content.private_browsing: # This setting always trumps what's passed in. private = True else: private = bool(private) self._private = private self.tabbed_browser = tabbedbrowser.TabbedBrowser(win_id=self.win_id, private=private, parent=self) objreg.register('tabbed-browser', self.tabbed_browser, scope='window', window=self.win_id) self._init_command_dispatcher() # We need to set an explicit parent for StatusBar because it does some # show/hide magic immediately which would mean it'd show up as a # window. self.status = bar.StatusBar(win_id=self.win_id, private=private, parent=self) self._add_widgets() self._downloadview.show() self._init_completion() log.init.debug("Initializing modes...") modeman.init(self.win_id, self) self._commandrunner = runners.CommandRunner(self.win_id, partial_match=True) self._keyhint = keyhintwidget.KeyHintView(self.win_id, self) self._add_overlay(self._keyhint, self._keyhint.update_geometry) self._prompt_container = prompt.PromptContainer(self.win_id, self) self._add_overlay(self._prompt_container, self._prompt_container.update_geometry, centered=True, padding=10) objreg.register('prompt-container', self._prompt_container, scope='window', window=self.win_id) self._prompt_container.hide() self._messageview = messageview.MessageView(parent=self) self._add_overlay(self._messageview, self._messageview.update_geometry) self._init_geometry(geometry) self._connect_signals() # When we're here the statusbar might not even really exist yet, so # resizing will fail. Therefore, we use singleShot QTimers to make sure # we defer this until everything else is initialized. QTimer.singleShot(0, self._connect_overlay_signals) config.instance.changed.connect(self._on_config_changed) objreg.get("app").new_window.emit(self) self._set_decoration(config.val.window.hide_decoration) self.state_before_fullscreen = self.windowState() def _init_geometry(self, geometry): """Initialize the window geometry or load it from disk.""" if geometry is not None: self._load_geometry(geometry) elif self.win_id == 0: self._load_state_geometry() else: self._set_default_geometry() log.init.debug("Initial main window geometry: {}".format( self.geometry())) def _add_overlay(self, widget, signal, *, centered=False, padding=0): self._overlays.append((widget, signal, centered, padding)) def _update_overlay_geometries(self): """Update the size/position of all overlays.""" for w, _signal, centered, padding in self._overlays: self._update_overlay_geometry(w, centered, padding) def _update_overlay_geometry(self, widget, centered, padding): """Reposition/resize the given overlay.""" if not widget.isVisible(): return size_hint = widget.sizeHint() if widget.sizePolicy().horizontalPolicy() == QSizePolicy.Expanding: width = self.width() - 2 * padding left = padding else: width = min(size_hint.width(), self.width() - 2 * padding) left = (self.width() - width) / 2 if centered else 0 height_padding = 20 status_position = config.val.statusbar.position if status_position == 'bottom': if self.status.isVisible(): status_height = self.status.height() bottom = self.status.geometry().top() else: status_height = 0 bottom = self.height() top = self.height() - status_height - size_hint.height() top = qtutils.check_overflow(top, 'int', fatal=False) topleft = QPoint(left, max(height_padding, top)) bottomright = QPoint(left + width, bottom) elif status_position == 'top': if self.status.isVisible(): status_height = self.status.height() top = self.status.geometry().bottom() else: status_height = 0 top = 0 topleft = QPoint(left, top) bottom = status_height + size_hint.height() bottom = qtutils.check_overflow(bottom, 'int', fatal=False) bottomright = QPoint(left + width, min(self.height() - height_padding, bottom)) else: raise ValueError("Invalid position {}!".format(status_position)) rect = QRect(topleft, bottomright) log.misc.debug('new geometry for {!r}: {}'.format(widget, rect)) if rect.isValid(): widget.setGeometry(rect) def _init_downloadmanager(self): log.init.debug("Initializing downloads...") qtnetwork_download_manager = objreg.get('qtnetwork-download-manager') try: webengine_download_manager = objreg.get( 'webengine-download-manager') except KeyError: webengine_download_manager = None download_model = downloads.DownloadModel(qtnetwork_download_manager, webengine_download_manager) objreg.register('download-model', download_model, scope='window', window=self.win_id) def _init_completion(self): self._completion = completionwidget.CompletionView(self.win_id, self) cmd = objreg.get('status-command', scope='window', window=self.win_id) completer_obj = completer.Completer(cmd=cmd, win_id=self.win_id, parent=self._completion) self._completion.selection_changed.connect( completer_obj.on_selection_changed) objreg.register('completion', self._completion, scope='window', window=self.win_id) self._add_overlay(self._completion, self._completion.update_geometry) def _init_command_dispatcher(self): dispatcher = commands.CommandDispatcher(self.win_id, self.tabbed_browser) objreg.register('command-dispatcher', dispatcher, scope='window', window=self.win_id) self.tabbed_browser.widget.destroyed.connect( functools.partial(objreg.delete, 'command-dispatcher', scope='window', window=self.win_id)) def __repr__(self): return utils.get_repr(self) @pyqtSlot(str) def _on_config_changed(self, option): """Resize the completion if related config options changed.""" if option == 'statusbar.padding': self._update_overlay_geometries() elif option == 'downloads.position': self._add_widgets() elif option == 'statusbar.position': self._add_widgets() self._update_overlay_geometries() elif option == 'window.hide_decoration': self._set_decoration(config.val.window.hide_decoration) def _add_widgets(self): """Add or readd all widgets to the VBox.""" self._vbox.removeWidget(self.tabbed_browser.widget) self._vbox.removeWidget(self._downloadview) self._vbox.removeWidget(self.status) widgets = [self.tabbed_browser.widget] downloads_position = config.val.downloads.position if downloads_position == 'top': widgets.insert(0, self._downloadview) elif downloads_position == 'bottom': widgets.append(self._downloadview) else: raise ValueError("Invalid position {}!".format(downloads_position)) status_position = config.val.statusbar.position if status_position == 'top': widgets.insert(0, self.status) elif status_position == 'bottom': widgets.append(self.status) else: raise ValueError("Invalid position {}!".format(status_position)) for widget in widgets: self._vbox.addWidget(widget) def _load_state_geometry(self): """Load the geometry from the state file.""" try: data = configfiles.state['geometry']['mainwindow'] geom = base64.b64decode(data, validate=True) except KeyError: # First start self._set_default_geometry() except binascii.Error: log.init.exception("Error while reading geometry") self._set_default_geometry() else: self._load_geometry(geom) def _save_geometry(self): """Save the window geometry to the state config.""" data = bytes(self.saveGeometry()) geom = base64.b64encode(data).decode('ASCII') configfiles.state['geometry']['mainwindow'] = geom def _load_geometry(self, geom): """Load geometry from a bytes object. If loading fails, loads default geometry. """ log.init.debug("Loading mainwindow from {!r}".format(geom)) ok = self.restoreGeometry(geom) if not ok: log.init.warning("Error while loading geometry.") self._set_default_geometry() def _connect_overlay_signals(self): """Connect the resize signal and resize everything once.""" for widget, signal, centered, padding in self._overlays: signal.connect( functools.partial(self._update_overlay_geometry, widget, centered, padding)) self._update_overlay_geometry(widget, centered, padding) def _set_default_geometry(self): """Set some sensible default geometry.""" self.setGeometry(QRect(50, 50, 800, 600)) def _get_object(self, name): """Get an object for this window in the object registry.""" return objreg.get(name, scope='window', window=self.win_id) def _connect_signals(self): """Connect all mainwindow signals.""" status = self._get_object('statusbar') keyparsers = self._get_object('keyparsers') completion_obj = self._get_object('completion') cmd = self._get_object('status-command') message_bridge = self._get_object('message-bridge') mode_manager = self._get_object('mode-manager') # misc self.tabbed_browser.close_window.connect(self.close) mode_manager.entered.connect(hints.on_mode_entered) # status bar mode_manager.entered.connect(status.on_mode_entered) mode_manager.left.connect(status.on_mode_left) mode_manager.left.connect(cmd.on_mode_left) mode_manager.left.connect(message.global_bridge.mode_left) # commands keyparsers[usertypes.KeyMode.normal].keystring_updated.connect( status.keystring.setText) cmd.got_cmd[str].connect(self._commandrunner.run_safely) cmd.got_cmd[str, int].connect(self._commandrunner.run_safely) cmd.returnPressed.connect(self.tabbed_browser.on_cmd_return_pressed) # key hint popup for mode, parser in keyparsers.items(): parser.keystring_updated.connect(functools.partial( self._keyhint.update_keyhint, mode.name)) # messages message.global_bridge.show_message.connect( self._messageview.show_message) message.global_bridge.flush() message.global_bridge.clear_messages.connect( self._messageview.clear_messages) message_bridge.s_set_text.connect(status.set_text) message_bridge.s_maybe_reset_text.connect(status.txt.maybe_reset_text) # statusbar self.tabbed_browser.current_tab_changed.connect(status.on_tab_changed) self.tabbed_browser.cur_progress.connect(status.prog.setValue) self.tabbed_browser.cur_load_finished.connect(status.prog.hide) self.tabbed_browser.cur_load_started.connect( status.prog.on_load_started) self.tabbed_browser.cur_scroll_perc_changed.connect( status.percentage.set_perc) self.tabbed_browser.widget.tab_index_changed.connect( status.tabindex.on_tab_index_changed) self.tabbed_browser.cur_url_changed.connect(status.url.set_url) self.tabbed_browser.cur_url_changed.connect(functools.partial( status.backforward.on_tab_cur_url_changed, tabs=self.tabbed_browser)) self.tabbed_browser.cur_link_hovered.connect(status.url.set_hover_url) self.tabbed_browser.cur_load_status_changed.connect( status.url.on_load_status_changed) self.tabbed_browser.cur_caret_selection_toggled.connect( status.on_caret_selection_toggled) self.tabbed_browser.cur_fullscreen_requested.connect( self._on_fullscreen_requested) self.tabbed_browser.cur_fullscreen_requested.connect(status.maybe_hide) # command input / completion mode_manager.entered.connect(self.tabbed_browser.on_mode_entered) mode_manager.left.connect(self.tabbed_browser.on_mode_left) cmd.clear_completion_selection.connect( completion_obj.on_clear_completion_selection) cmd.hide_completion.connect(completion_obj.hide) def _set_decoration(self, hidden): """Set the visibility of the window decoration via Qt.""" window_flags = Qt.Window refresh_window = self.isVisible() if hidden: window_flags |= Qt.CustomizeWindowHint | Qt.NoDropShadowWindowHint self.setWindowFlags(window_flags) if refresh_window: self.show() @pyqtSlot(bool) def _on_fullscreen_requested(self, on): if not config.val.content.windowed_fullscreen: if on: self.state_before_fullscreen = self.windowState() self.setWindowState( Qt.WindowFullScreen | self.state_before_fullscreen) elif self.isFullScreen(): self.setWindowState(self.state_before_fullscreen) log.misc.debug('on: {}, state before fullscreen: {}'.format( on, debug.qflags_key(Qt, self.state_before_fullscreen))) @cmdutils.register(instance='main-window', scope='window') @pyqtSlot() def close(self): """Close the current window. // Extend close() so we can register it as a command. """ super().close() def resizeEvent(self, e): """Extend resizewindow's resizeEvent to adjust completion. Args: e: The QResizeEvent """ super().resizeEvent(e) self._update_overlay_geometries() self._downloadview.updateGeometry() self.tabbed_browser.widget.tabBar().refresh() def showEvent(self, e): """Extend showEvent to register us as the last-visible-main-window. Args: e: The QShowEvent """ super().showEvent(e) objreg.register('last-visible-main-window', self, update=True) def _do_close(self): """Helper function for closeEvent.""" try: last_visible = objreg.get('last-visible-main-window') if self is last_visible: objreg.delete('last-visible-main-window') except KeyError: pass objreg.get('session-manager').save_last_window_session() self._save_geometry() log.destroy.debug("Closing window {}".format(self.win_id)) self.tabbed_browser.shutdown() def closeEvent(self, e): """Override closeEvent to display a confirmation if needed.""" if crashsignal.is_crashing: e.accept() return tab_count = self.tabbed_browser.widget.count() download_model = objreg.get('download-model', scope='window', window=self.win_id) download_count = download_model.running_downloads() quit_texts = [] # Ask if multiple-tabs are open if 'multiple-tabs' in config.val.confirm_quit and tab_count > 1: quit_texts.append("{} {} open.".format( tab_count, "tab is" if tab_count == 1 else "tabs are")) # Ask if multiple downloads running if 'downloads' in config.val.confirm_quit and download_count > 0: quit_texts.append("{} {} running.".format( download_count, "download is" if download_count == 1 else "downloads are")) # Process all quit messages that user must confirm if quit_texts or 'always' in config.val.confirm_quit: msg = jinja.environment.from_string(""" <ul> {% for text in quit_texts %} <li>{{text}}</li> {% endfor %} </ul> """.strip()).render(quit_texts=quit_texts) confirmed = message.ask('Really quit?', msg, mode=usertypes.PromptMode.yesno, default=True) # Stop asking if the user cancels if not confirmed: log.destroy.debug("Cancelling closing of window {}".format( self.win_id)) e.ignore() return e.accept() self._do_close()
1
23,207
Would using `HintLabel` here instead (without `setObjectName`) work?
qutebrowser-qutebrowser
py
@@ -51,7 +51,7 @@ type solver interface { // It is useful for mocking out a given provider since an alternate set of // constructors may be set. type dnsProviderConstructors struct { - cloudDNS func(project string, serviceAccount []byte, dns01Nameservers []string) (*clouddns.DNSProvider, error) + cloudDNS func(project string, serviceAccountFile string, serviceAccount []byte, dns01Nameservers []string, ambient bool) (*clouddns.DNSProvider, error) cloudFlare func(email, apikey string, dns01Nameservers []string) (*cloudflare.DNSProvider, error) route53 func(accessKey, secretKey, hostedZoneID, region string, ambient bool, dns01Nameservers []string) (*route53.DNSProvider, error) azureDNS func(clientID, clientSecret, subscriptionID, tenentID, resourceGroupName, hostedZoneName string, dns01Nameservers []string) (*azuredns.DNSProvider, error)
1
/* Copyright 2018 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package dns import ( "context" "fmt" "strings" "time" "github.com/golang/glog" "github.com/pkg/errors" corev1listers "k8s.io/client-go/listers/core/v1" "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1" "github.com/jetstack/cert-manager/pkg/controller" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/acmedns" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/akamai" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/azuredns" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/clouddns" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/cloudflare" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/route53" "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/util" ) const ( cloudDNSServiceAccountKey = "service-account.json" ) type solver interface { Present(domain, token, key string) error CleanUp(domain, token, key string) error Timeout() (timeout, interval time.Duration) } // dnsProviderConstructors defines how each provider may be constructed. // It is useful for mocking out a given provider since an alternate set of // constructors may be set. type dnsProviderConstructors struct { cloudDNS func(project string, serviceAccount []byte, dns01Nameservers []string) (*clouddns.DNSProvider, error) cloudFlare func(email, apikey string, dns01Nameservers []string) (*cloudflare.DNSProvider, error) route53 func(accessKey, secretKey, hostedZoneID, region string, ambient bool, dns01Nameservers []string) (*route53.DNSProvider, error) azureDNS func(clientID, clientSecret, subscriptionID, tenentID, resourceGroupName, hostedZoneName string, dns01Nameservers []string) (*azuredns.DNSProvider, error) acmeDNS func(host string, accountJson []byte, dns01Nameservers []string) (*acmedns.DNSProvider, error) } // Solver is a solver for the acme dns01 challenge. // Given a Certificate object, it determines the correct DNS provider based on // the certificate, and configures it based on the referenced issuer. type Solver struct { *controller.Context secretLister corev1listers.SecretLister dnsProviderConstructors dnsProviderConstructors } func (s *Solver) Present(ctx context.Context, issuer v1alpha1.GenericIssuer, _ *v1alpha1.Certificate, ch v1alpha1.ACMEOrderChallenge) error { if ch.SolverConfig.DNS01 == nil { return fmt.Errorf("challenge dns config must be specified") } providerName := ch.SolverConfig.DNS01.Provider if providerName == "" { return fmt.Errorf("dns01 challenge provider name must be set") } slv, err := s.solverForIssuerProvider(issuer, providerName) if err != nil { return err } glog.Infof("Presenting DNS01 challenge for domain %q", ch.Domain) return slv.Present(ch.Domain, ch.Token, ch.Key) } func (s *Solver) Check(ch v1alpha1.ACMEOrderChallenge) (bool, error) { fqdn, value, ttl, err := util.DNS01Record(ch.Domain, ch.Key, s.DNS01Nameservers) if err != nil { return false, err } glog.Infof("Checking DNS propagation for %q using name servers: %v", ch.Domain, s.DNS01Nameservers) ok, err := util.PreCheckDNS(fqdn, value, s.DNS01Nameservers) if err != nil { return false, err } if !ok { glog.Infof("DNS record for %q not yet propagated", ch.Domain) return false, nil } glog.Infof("Waiting DNS record TTL (%ds) to allow propagation of DNS record for domain %q", ttl, fqdn) time.Sleep(time.Second * time.Duration(ttl)) glog.Infof("ACME DNS01 validation record propagated for %q", fqdn) return true, nil } func (s *Solver) CleanUp(ctx context.Context, issuer v1alpha1.GenericIssuer, _ *v1alpha1.Certificate, ch v1alpha1.ACMEOrderChallenge) error { if ch.SolverConfig.DNS01 == nil { return fmt.Errorf("challenge dns config must be specified") } providerName := ch.SolverConfig.DNS01.Provider if providerName == "" { return fmt.Errorf("dns01 challenge provider name must be set") } slv, err := s.solverForIssuerProvider(issuer, providerName) if err != nil { return err } return slv.CleanUp(ch.Domain, ch.Token, ch.Key) } // solverForIssuerProvider returns a Solver for the given providerName. // The providerName is the name of an ACME DNS-01 challenge provider as // specified on the Issuer resource for the Solver. // // This method is exported so that only the provider name is required in order // to obtain an instance of a Solver. This is useful when cleaning up old // challenges after the ACME challenge configuration on the Certificate has // been removed by the user. func (s *Solver) solverForIssuerProvider(issuer v1alpha1.GenericIssuer, providerName string) (solver, error) { resourceNamespace := s.ResourceNamespace(issuer) providerConfig, err := issuer.GetSpec().ACME.DNS01.Provider(providerName) if err != nil { return nil, err } var impl solver switch { case providerConfig.Akamai != nil: clientToken, err := s.loadSecretData(&providerConfig.Akamai.ClientToken, resourceNamespace) if err != nil { return nil, errors.Wrap(err, "error getting akamai client token") } clientSecret, err := s.loadSecretData(&providerConfig.Akamai.ClientSecret, resourceNamespace) if err != nil { return nil, errors.Wrap(err, "error getting akamai client secret") } accessToken, err := s.loadSecretData(&providerConfig.Akamai.AccessToken, resourceNamespace) if err != nil { return nil, errors.Wrap(err, "error getting akamai access token") } impl, err = akamai.NewDNSProvider( providerConfig.Akamai.ServiceConsumerDomain, string(clientToken), string(clientSecret), string(accessToken), s.DNS01Nameservers) if err != nil { return nil, errors.Wrap(err, "error instantiating akamai challenge solver") } case providerConfig.CloudDNS != nil: saSecret, err := s.secretLister.Secrets(resourceNamespace).Get(providerConfig.CloudDNS.ServiceAccount.Name) if err != nil { return nil, fmt.Errorf("error getting clouddns service account: %s", err) } saKey := providerConfig.CloudDNS.ServiceAccount.Key saBytes := saSecret.Data[saKey] if len(saBytes) == 0 { return nil, fmt.Errorf("specfied key %q not found in secret %s/%s", saKey, saSecret.Namespace, saSecret.Name) } impl, err = s.dnsProviderConstructors.cloudDNS(providerConfig.CloudDNS.Project, saBytes, s.DNS01Nameservers) if err != nil { return nil, fmt.Errorf("error instantiating google clouddns challenge solver: %s", err) } case providerConfig.Cloudflare != nil: apiKeySecret, err := s.secretLister.Secrets(resourceNamespace).Get(providerConfig.Cloudflare.APIKey.Name) if err != nil { return nil, fmt.Errorf("error getting cloudflare service account: %s", err) } email := providerConfig.Cloudflare.Email apiKey := string(apiKeySecret.Data[providerConfig.Cloudflare.APIKey.Key]) impl, err = s.dnsProviderConstructors.cloudFlare(email, apiKey, s.DNS01Nameservers) if err != nil { return nil, fmt.Errorf("error instantiating cloudflare challenge solver: %s", err) } case providerConfig.Route53 != nil: secretAccessKey := "" if providerConfig.Route53.SecretAccessKey.Name != "" { secretAccessKeySecret, err := s.secretLister.Secrets(resourceNamespace).Get(providerConfig.Route53.SecretAccessKey.Name) if err != nil { return nil, fmt.Errorf("error getting route53 secret access key: %s", err) } secretAccessKeyBytes, ok := secretAccessKeySecret.Data[providerConfig.Route53.SecretAccessKey.Key] if !ok { return nil, fmt.Errorf("error getting route53 secret access key: key '%s' not found in secret", providerConfig.Route53.SecretAccessKey.Key) } secretAccessKey = string(secretAccessKeyBytes) } impl, err = s.dnsProviderConstructors.route53( strings.TrimSpace(providerConfig.Route53.AccessKeyID), strings.TrimSpace(secretAccessKey), providerConfig.Route53.HostedZoneID, providerConfig.Route53.Region, s.CanUseAmbientCredentials(issuer), s.DNS01Nameservers, ) if err != nil { return nil, fmt.Errorf("error instantiating route53 challenge solver: %s", err) } case providerConfig.AzureDNS != nil: clientSecret, err := s.secretLister.Secrets(resourceNamespace).Get(providerConfig.AzureDNS.ClientSecret.Name) if err != nil { return nil, fmt.Errorf("error getting azuredns client secret: %s", err) } clientSecretBytes, ok := clientSecret.Data[providerConfig.AzureDNS.ClientSecret.Key] if !ok { return nil, fmt.Errorf("error getting azure dns client secret: key '%s' not found in secret", providerConfig.AzureDNS.ClientSecret.Key) } impl, err = s.dnsProviderConstructors.azureDNS( providerConfig.AzureDNS.ClientID, string(clientSecretBytes), providerConfig.AzureDNS.SubscriptionID, providerConfig.AzureDNS.TenantID, providerConfig.AzureDNS.ResourceGroupName, providerConfig.AzureDNS.HostedZoneName, s.DNS01Nameservers, ) case providerConfig.AcmeDNS != nil: accountSecret, err := s.secretLister.Secrets(resourceNamespace).Get(providerConfig.AcmeDNS.AccountSecret.Name) if err != nil { return nil, fmt.Errorf("error getting acmedns accounts secret: %s", err) } accountSecretBytes, ok := accountSecret.Data[providerConfig.AcmeDNS.AccountSecret.Key] if !ok { return nil, fmt.Errorf("error getting acmedns accounts secret: key '%s' not found in secret", providerConfig.AcmeDNS.AccountSecret.Key) } impl, err = s.dnsProviderConstructors.acmeDNS( providerConfig.AcmeDNS.Host, accountSecretBytes, s.DNS01Nameservers, ) default: return nil, fmt.Errorf("no dns provider config specified for provider %q", providerName) } return impl, nil } func NewSolver(ctx *controller.Context) *Solver { return &Solver{ ctx, ctx.KubeSharedInformerFactory.Core().V1().Secrets().Lister(), dnsProviderConstructors{ clouddns.NewDNSProviderServiceAccountBytes, cloudflare.NewDNSProviderCredentials, route53.NewDNSProvider, azuredns.NewDNSProviderCredentials, acmedns.NewDNSProviderHostBytes, }, } } func (s *Solver) loadSecretData(selector *v1alpha1.SecretKeySelector, ns string) ([]byte, error) { secret, err := s.secretLister.Secrets(ns).Get(selector.Name) if err != nil { return nil, errors.Wrapf(err, "failed to load secret %q", ns+"/"+selector.Name) } if data, ok := secret.Data[selector.Key]; ok { return data, nil } return nil, errors.Errorf("no key %q in secret %q", selector.Key, ns+"/"+selector.Name) }
1
12,842
From what I can tell, the `serviceAccountFile` is never set to anything except ""? It'd be best to remove this altogether, so we don't mislead future developers
jetstack-cert-manager
go
@@ -18,11 +18,6 @@ </li> </ul> </nav> - <nav> - <ul> - <li class="account"><%= link_to 'Sign in', sign_in_path %></li> - </ul> - </nav> </div> </section>
1
<!DOCTYPE html> <html lang="en"> <head prefix="og: http://ogp.me/ns#"> <%= render 'application/head_contents' %> </head> <body class="<%= body_class %> <%= yield(:additional_body_classes) %>"> <section id="header-wrapper"> <div class="header-container"> <h1 class="small-logo"> <%= image_tag('upcase/upcase-ralph-small.png') %> <span>Upcase</span> </h1> <nav class="left"> <ul> <li class="back"> <%= yield(:landing_page_back_link) %> </li> </ul> </nav> <nav> <ul> <li class="account"><%= link_to 'Sign in', sign_in_path %></li> </ul> </nav> </div> </section> <section class="content"> <% if content_for?(:subject_block) %> <div class="subject"><%= yield(:subject_block) %></div> <% end %> <%= yield %> </section> <footer class="links"> <p id="copyright"> © 2012 - <%= Time.zone.today.year %> thoughtbot, inc. The <em>design of a robot</em> and <em>thoughtbot</em> are registered trademarks of thoughtbot,&nbsp;inc. See our <%= link_to 'Privacy Policy', privacy_path %> and <%= link_to 'Terms & Conditions', terms_path %>. </p> </footer> <%= render 'shared/javascript' %> </body> </html>
1
12,270
Why do we remove the Sign in link?
thoughtbot-upcase
rb
@@ -186,11 +186,6 @@ public class FeedItemMenuHandler { GpodnetPreferences.enqueueEpisodeAction(actionNew); } break; - case R.id.move_to_top_item: - DBWriter.moveQueueItemToTop(selectedItem.getId(), true); - return true; - case R.id.move_to_bottom_item: - DBWriter.moveQueueItemToBottom(selectedItem.getId(), true); case R.id.add_to_queue_item: DBWriter.addQueueItem(context, selectedItem); break;
1
package de.danoeh.antennapod.menuhandler; import android.content.Context; import android.content.Intent; import android.net.Uri; import android.util.Log; import android.widget.Toast; import de.danoeh.antennapod.R; import de.danoeh.antennapod.core.feed.FeedItem; import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction; import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction.Action; import de.danoeh.antennapod.core.preferences.GpodnetPreferences; import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.core.service.playback.PlaybackService; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.DownloadRequestException; import de.danoeh.antennapod.core.util.IntentUtils; import de.danoeh.antennapod.core.util.LongList; import de.danoeh.antennapod.core.util.ShareUtils; /** * Handles interactions with the FeedItemMenu. */ public class FeedItemMenuHandler { private static final String TAG = "FeedItemMenuHandler"; private FeedItemMenuHandler() { } /** * Used by the MenuHandler to access different types of menus through one * interface */ public interface MenuInterface { /** * Implementations of this method should call findItem(id) on their * menu-object and call setVisibility(visibility) on the returned * MenuItem object. */ abstract void setItemVisibility(int id, boolean visible); } /** * This method should be called in the prepare-methods of menus. It changes * the visibility of the menu items depending on a FeedItem's attributes. * * @param mi An instance of MenuInterface that the method uses to change a * MenuItem's visibility * @param selectedItem The FeedItem for which the menu is supposed to be prepared * @param showExtendedMenu True if MenuItems that let the user share information about * the FeedItem and visit its website should be set visible. This * parameter should be set to false if the menu space is limited. * @param queueAccess Used for testing if the queue contains the selected item * @return Returns true if selectedItem is not null. */ public static boolean onPrepareMenu(Context context, MenuInterface mi, FeedItem selectedItem, boolean showExtendedMenu, LongList queueAccess) { if (selectedItem == null) { return false; } boolean hasMedia = selectedItem.getMedia() != null; boolean isPlaying = hasMedia && selectedItem.getState() == FeedItem.State.PLAYING; FeedItem.State state = selectedItem.getState(); if (!isPlaying) { mi.setItemVisibility(R.id.skip_episode_item, false); } boolean isInQueue = selectedItem.isTagged(FeedItem.TAG_QUEUE); if(queueAccess == null || queueAccess.size() == 0 || queueAccess.get(0) == selectedItem.getId()) { mi.setItemVisibility(R.id.move_to_top_item, false); } if(queueAccess == null || queueAccess.size() == 0 || queueAccess.get(queueAccess.size()-1) == selectedItem.getId()) { mi.setItemVisibility(R.id.move_to_bottom_item, false); } if (!isInQueue || isPlaying) { mi.setItemVisibility(R.id.remove_from_queue_item, false); } if (!(!isInQueue && selectedItem.getMedia() != null)) { mi.setItemVisibility(R.id.add_to_queue_item, false); } if (!showExtendedMenu || selectedItem.getLink() == null) { mi.setItemVisibility(R.id.visit_website_item, false); mi.setItemVisibility(R.id.share_link_item, false); mi.setItemVisibility(R.id.share_link_with_position_item, false); } if (!showExtendedMenu || !hasMedia || selectedItem.getMedia().getDownload_url() == null) { mi.setItemVisibility(R.id.share_download_url_item, false); mi.setItemVisibility(R.id.share_download_url_with_position_item, false); } if(false == hasMedia || selectedItem.getMedia().getPosition() <= 0) { mi.setItemVisibility(R.id.share_link_with_position_item, false); mi.setItemVisibility(R.id.share_download_url_with_position_item, false); } if (selectedItem.isPlayed()) { mi.setItemVisibility(R.id.mark_read_item, false); } else { mi.setItemVisibility(R.id.mark_unread_item, false); } if(selectedItem.getMedia() == null || selectedItem.getMedia().getPosition() == 0) { mi.setItemVisibility(R.id.reset_position, false); } if(false == UserPreferences.isEnableAutodownload()) { mi.setItemVisibility(R.id.activate_auto_download, false); mi.setItemVisibility(R.id.deactivate_auto_download, false); } else if(selectedItem.getAutoDownload()) { mi.setItemVisibility(R.id.activate_auto_download, false); } else { mi.setItemVisibility(R.id.deactivate_auto_download, false); } if (selectedItem.getPaymentLink() == null || !selectedItem.getFlattrStatus().flattrable()) { mi.setItemVisibility(R.id.support_item, false); } boolean isFavorite = selectedItem.isTagged(FeedItem.TAG_FAVORITE); mi.setItemVisibility(R.id.add_to_favorites_item, !isFavorite); mi.setItemVisibility(R.id.remove_from_favorites_item, isFavorite); return true; } /** * The same method as onPrepareMenu(MenuInterface, FeedItem, boolean, QueueAccess), but lets the * caller also specify a list of menu items that should not be shown. * * @param excludeIds Menu item that should be excluded * @return true if selectedItem is not null. */ public static boolean onPrepareMenu(Context context, MenuInterface mi, FeedItem selectedItem, boolean showExtendedMenu, LongList queueAccess, int... excludeIds) { boolean rc = onPrepareMenu(context, mi, selectedItem, showExtendedMenu, queueAccess); if (rc && excludeIds != null) { for (int id : excludeIds) { mi.setItemVisibility(id, false); } } return rc; } public static boolean onMenuItemClicked(Context context, int menuItemId, FeedItem selectedItem) throws DownloadRequestException { switch (menuItemId) { case R.id.skip_episode_item: context.sendBroadcast(new Intent(PlaybackService.ACTION_SKIP_CURRENT_EPISODE)); break; case R.id.remove_item: DBWriter.deleteFeedMediaOfItem(context, selectedItem.getMedia().getId()); break; case R.id.mark_read_item: selectedItem.setPlayed(true); DBWriter.markItemPlayed(selectedItem, FeedItem.PLAYED, false); if(GpodnetPreferences.loggedIn()) { FeedMedia media = selectedItem.getMedia(); // not all items have media, Gpodder only cares about those that do if (media != null) { GpodnetEpisodeAction actionPlay = new GpodnetEpisodeAction.Builder(selectedItem, Action.PLAY) .currentDeviceId() .currentTimestamp() .started(media.getDuration() / 1000) .position(media.getDuration() / 1000) .total(media.getDuration() / 1000) .build(); GpodnetPreferences.enqueueEpisodeAction(actionPlay); } } break; case R.id.mark_unread_item: selectedItem.setPlayed(false); DBWriter.markItemPlayed(selectedItem, FeedItem.UNPLAYED, false); if(GpodnetPreferences.loggedIn()) { GpodnetEpisodeAction actionNew = new GpodnetEpisodeAction.Builder(selectedItem, Action.NEW) .currentDeviceId() .currentTimestamp() .build(); GpodnetPreferences.enqueueEpisodeAction(actionNew); } break; case R.id.move_to_top_item: DBWriter.moveQueueItemToTop(selectedItem.getId(), true); return true; case R.id.move_to_bottom_item: DBWriter.moveQueueItemToBottom(selectedItem.getId(), true); case R.id.add_to_queue_item: DBWriter.addQueueItem(context, selectedItem); break; case R.id.remove_from_queue_item: DBWriter.removeQueueItem(context, selectedItem, true); break; case R.id.add_to_favorites_item: DBWriter.addFavoriteItem(selectedItem); break; case R.id.remove_from_favorites_item: DBWriter.removeFavoriteItem(selectedItem); break; case R.id.reset_position: selectedItem.getMedia().setPosition(0); DBWriter.markItemPlayed(selectedItem, FeedItem.UNPLAYED, true); break; case R.id.activate_auto_download: selectedItem.setAutoDownload(true); DBWriter.setFeedItemAutoDownload(selectedItem, true); break; case R.id.deactivate_auto_download: selectedItem.setAutoDownload(false); DBWriter.setFeedItemAutoDownload(selectedItem, false); break; case R.id.visit_website_item: Uri uri = Uri.parse(selectedItem.getLink()); Intent intent = new Intent(Intent.ACTION_VIEW, uri); if(IntentUtils.isCallable(context, intent)) { context.startActivity(intent); } else { Toast.makeText(context, context.getString(R.string.download_error_malformed_url), Toast.LENGTH_SHORT); } break; case R.id.support_item: DBTasks.flattrItemIfLoggedIn(context, selectedItem); break; case R.id.share_link_item: ShareUtils.shareFeedItemLink(context, selectedItem); break; case R.id.share_download_url_item: ShareUtils.shareFeedItemDownloadLink(context, selectedItem); break; case R.id.share_link_with_position_item: ShareUtils.shareFeedItemLink(context, selectedItem, true); break; case R.id.share_download_url_with_position_item: ShareUtils.shareFeedItemDownloadLink(context, selectedItem, true); break; default: Log.d(TAG, "Unknown menuItemId: " + menuItemId); return false; } // Refresh menu state return true; } }
1
12,676
holy crap, were we really missing a 'break' statement here?
AntennaPod-AntennaPod
java
@@ -28,6 +28,10 @@ type logMaker interface { MakeLogger(module string) logger.Logger } +type debugForcedLogMaker interface { + MakeLoggerForceEnableDebug(module string) logger.Logger +} + type blockCacher interface { BlockCache() BlockCache }
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "time" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfsmd" "github.com/keybase/kbfs/tlf" metrics "github.com/rcrowley/go-metrics" "golang.org/x/net/context" ) type dataVersioner interface { // DataVersion returns the data version for this block DataVersion() DataVer } type logMaker interface { MakeLogger(module string) logger.Logger } type blockCacher interface { BlockCache() BlockCache } type keyGetterGetter interface { keyGetter() blockKeyGetter } type codecGetter interface { Codec() kbfscodec.Codec } type blockServerGetter interface { BlockServer() BlockServer } type cryptoPureGetter interface { cryptoPure() cryptoPure } type cryptoGetter interface { Crypto() Crypto } type currentSessionGetterGetter interface { CurrentSessionGetter() CurrentSessionGetter } type signerGetter interface { Signer() kbfscrypto.Signer } type diskBlockCacheGetter interface { DiskBlockCache() DiskBlockCache } type diskBlockCacheSetter interface { MakeDiskBlockCacheIfNotExists() error } type clockGetter interface { Clock() Clock } type diskLimiterGetter interface { DiskLimiter() DiskLimiter } type syncedTlfGetterSetter interface { IsSyncedTlf(tlfID tlf.ID) bool SetTlfSyncState(tlfID tlf.ID, isSynced bool) error } type blockRetrieverGetter interface { BlockRetriever() BlockRetriever } // Block just needs to be (de)serialized using msgpack type Block interface { dataVersioner // GetEncodedSize returns the encoded size of this block, but only // if it has been previously set; otherwise it returns 0. GetEncodedSize() uint32 // SetEncodedSize sets the encoded size of this block, locally // caching it. The encoded size is not serialized. SetEncodedSize(size uint32) // NewEmpty returns a new block of the same type as this block NewEmpty() Block // Set sets this block to the same value as the passed-in block Set(other Block) // ToCommonBlock retrieves this block as a *CommonBlock. ToCommonBlock() *CommonBlock } // NodeID is a unique but transient ID for a Node. That is, two Node // objects in memory at the same time represent the same file or // directory if and only if their NodeIDs are equal (by pointer). type NodeID interface { // ParentID returns the NodeID of the directory containing the // pointed-to file or directory, or nil if none exists. ParentID() NodeID } // Node represents a direct pointer to a file or directory in KBFS. // It is somewhat like an inode in a regular file system. Users of // KBFS can use Node as a handle when accessing files or directories // they have previously looked up. type Node interface { // GetID returns the ID of this Node. This should be used as a // map key instead of the Node itself. GetID() NodeID // GetFolderBranch returns the folder ID and branch for this Node. GetFolderBranch() FolderBranch // GetBasename returns the current basename of the node, or "" // if the node has been unlinked. GetBasename() string } // KBFSOps handles all file system operations. Expands all indirect // pointers. Operations that modify the server data change all the // block IDs along the path, and so must return a path with the new // BlockIds so the caller can update their references. // // KBFSOps implementations must guarantee goroutine-safety of calls on // a per-top-level-folder basis. // // There are two types of operations that could block: // * remote-sync operations, that need to synchronously update the // MD for the corresponding top-level folder. When these // operations return successfully, they will have guaranteed to // have successfully written the modification to the KBFS servers. // * remote-access operations, that don't sync any modifications to KBFS // servers, but may block on reading data from the servers. // // KBFSOps implementations are supposed to give git-like consistency // semantics for modification operations; they will be visible to // other clients immediately after the remote-sync operations succeed, // if and only if there was no other intervening modification to the // same folder. If not, the change will be sync'd to the server in a // special per-device "unmerged" area before the operation succeeds. // In this case, the modification will not be visible to other clients // until the KBFS code on this device performs automatic conflict // resolution in the background. // // All methods take a Context (see https://blog.golang.org/context), // and if that context is cancelled during the operation, KBFSOps will // abort any blocking calls and return ctx.Err(). Any notifications // resulting from an operation will also include this ctx (or a // Context derived from it), allowing the caller to determine whether // the notification is a result of their own action or an external // action. type KBFSOps interface { // GetFavorites returns the logged-in user's list of favorite // top-level folders. This is a remote-access operation. GetFavorites(ctx context.Context) ([]Favorite, error) // RefreshCachedFavorites tells the instances to forget any cached // favorites list and fetch a new list from the server. The // effects are asychronous; if there's an error refreshing the // favorites, the cached favorites will become empty. RefreshCachedFavorites(ctx context.Context) // AddFavorite adds the favorite to both the server and // the local cache. AddFavorite(ctx context.Context, fav Favorite) error // DeleteFavorite deletes the favorite from both the server and // the local cache. Idempotent, so it succeeds even if the folder // isn't favorited. DeleteFavorite(ctx context.Context, fav Favorite) error // GetTLFCryptKeys gets crypt key of all generations as well as // TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by // generation, starting with the key for FirstValidKeyGen. GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) ( keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) // GetTLFID gets the TLF ID for tlfHandle. GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (tlf.ID, error) // GetOrCreateRootNode returns the root node and root entry // info associated with the given TLF handle and branch, if // the logged-in user has read permissions to the top-level // folder. It creates the folder if one doesn't exist yet (and // branch == MasterBranch), and the logged-in user has write // permissions to the top-level folder. This is a // remote-access operation. GetOrCreateRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetRootNode is like GetOrCreateRootNode but if the root node // does not exist it will return a nil Node and not create it. GetRootNode( ctx context.Context, h *TlfHandle, branch BranchName) ( node Node, ei EntryInfo, err error) // GetDirChildren returns a map of children in the directory, // mapped to their EntryInfo, if the logged-in user has read // permission for the top-level folder. This is a remote-access // operation. GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error) // Lookup returns the Node and entry info associated with a // given name in a directory, if the logged-in user has read // permissions to the top-level folder. The returned Node is nil // if the name is a symlink. This is a remote-access operation. Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error) // Stat returns the entry info associated with a // given Node, if the logged-in user has read permissions to the // top-level folder. This is a remote-access operation. Stat(ctx context.Context, node Node) (EntryInfo, error) // CreateDir creates a new subdirectory under the given node, if // the logged-in user has write permission to the top-level // folder. Returns the new Node for the created subdirectory, and // its new entry info. This is a remote-sync operation. CreateDir(ctx context.Context, dir Node, name string) ( Node, EntryInfo, error) // CreateFile creates a new file under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new Node for the created file, and its new // entry info. excl (when implemented) specifies whether this is an exclusive // create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a // Unix open() call. // // This is a remote-sync operation. CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) ( Node, EntryInfo, error) // CreateLink creates a new symlink under the given node, if the // logged-in user has write permission to the top-level folder. // Returns the new entry info for the created symlink. This // is a remote-sync operation. CreateLink(ctx context.Context, dir Node, fromName string, toPath string) ( EntryInfo, error) // RemoveDir removes the subdirectory represented by the given // node, if the logged-in user has write permission to the // top-level folder. Will return an error if the subdirectory is // not empty. This is a remote-sync operation. RemoveDir(ctx context.Context, dir Node, dirName string) error // RemoveEntry removes the directory entry represented by the // given node, if the logged-in user has write permission to the // top-level folder. This is a remote-sync operation. RemoveEntry(ctx context.Context, dir Node, name string) error // Rename performs an atomic rename operation with a given // top-level folder if the logged-in user has write permission to // that folder, and will return an error if nodes from different // folders are passed in. Also returns an error if the new name // already has an entry corresponding to an existing directory // (only non-dir types may be renamed over). This is a // remote-sync operation. Rename(ctx context.Context, oldParent Node, oldName string, newParent Node, newName string) error // Read fills in the given buffer with data from the file at the // given node starting at the given offset, if the logged-in user // has read permission to the top-level folder. The read data // reflects any outstanding writes and truncates to that file that // have been written through this KBFSOps object, even if those // writes have not yet been sync'd. There is no guarantee that // Read returns all of the requested data; it will return the // number of bytes that it wrote to the dest buffer. Reads on an // unlinked file may or may not succeed, depending on whether or // not the data has been cached locally. If (0, nil) is returned, // that means EOF has been reached. This is a remote-access // operation. Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error) // Write modifies the file at the given node, by writing the given // buffer at the given offset within the file, if the logged-in // user has write permission to the top-level folder. It // overwrites any data already there, and extends the file size as // necessary to accomodate the new data. It guarantees to write // the entire buffer in one operation. Writes on an unlinked file // may or may not succeed as no-ops, depending on whether or not // the necessary blocks have been locally cached. This is a // remote-access operation. Write(ctx context.Context, file Node, data []byte, off int64) error // Truncate modifies the file at the given node, by either // shrinking or extending its size to match the given size, if the // logged-in user has write permission to the top-level folder. // If extending the file, it pads the new data with 0s. Truncates // on an unlinked file may or may not succeed as no-ops, depending // on whether or not the necessary blocks have been locally // cached. This is a remote-access operation. Truncate(ctx context.Context, file Node, size uint64) error // SetEx turns on or off the executable bit on the file // represented by a given node, if the logged-in user has write // permissions to the top-level folder. This is a remote-sync // operation. SetEx(ctx context.Context, file Node, ex bool) error // SetMtime sets the modification time on the file represented by // a given node, if the logged-in user has write permissions to // the top-level folder. If mtime is nil, it is a noop. This is // a remote-sync operation. SetMtime(ctx context.Context, file Node, mtime *time.Time) error // SyncAll flushes all outstanding writes and truncates for any // dirty files to the KBFS servers within the given folder, if the // logged-in user has write permissions to the top-level folder. // If done through a file system interface, this may include // modifications done via multiple file handles. This is a // remote-sync operation. SyncAll(ctx context.Context, folderBranch FolderBranch) error // FolderStatus returns the status of a particular folder/branch, along // with a channel that will be closed when the status has been // updated (to eliminate the need for polling this method). FolderStatus(ctx context.Context, folderBranch FolderBranch) ( FolderBranchStatus, <-chan StatusUpdate, error) // Status returns the status of KBFS, along with a channel that will be // closed when the status has been updated (to eliminate the need for // polling this method). Note that this channel only applies to // connection status changes. // // KBFSStatus can be non-empty even if there is an error. Status(ctx context.Context) ( KBFSStatus, <-chan StatusUpdate, error) // UnstageForTesting clears out this device's staged state, if // any, and fast-forwards to the current head of this // folder-branch. UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error // RequestRekey requests to rekey this folder. Note that this asynchronously // requests a rekey, so canceling ctx doesn't cancel the rekey. RequestRekey(ctx context.Context, id tlf.ID) // SyncFromServerForTesting blocks until the local client has // contacted the server and guaranteed that all known updates // for the given top-level folder have been applied locally // (and notifications sent out to any observers). It returns // an error if this folder-branch is currently unmerged or // dirty locally. If lockBeforeGet is non-nil, it blocks on idempotently // taking the lock from server at the time it gets any metadata. SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch, lockBeforeGet *keybase1.LockID) error // GetUpdateHistory returns a complete history of all the merged // updates of the given folder, in a data structure that's // suitable for encoding directly into JSON. This is an expensive // operation, and should only be used for ocassional debugging. // Note that the history does not include any unmerged changes or // outstanding writes from the local device. GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) ( history TLFUpdateHistory, err error) // GetEditHistory returns a clustered list of the most recent file // edits by each of the valid writers of the given folder. users // looking to get updates to this list can register as an observer // for the folder. GetEditHistory(ctx context.Context, folderBranch FolderBranch) ( edits TlfWriterEdits, err error) // GetNodeMetadata gets metadata associated with a Node. GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error) // Shutdown is called to clean up any resources associated with // this KBFSOps instance. Shutdown(ctx context.Context) error // PushConnectionStatusChange updates the status of a service for // human readable connection status tracking. PushConnectionStatusChange(service string, newStatus error) // PushStatusChange causes Status listeners to be notified via closing // the status channel. PushStatusChange() // ClearPrivateFolderMD clears any cached private folder metadata, // e.g. on a logout. ClearPrivateFolderMD(ctx context.Context) // ForceFastForward forwards the nodes of all folders that have // been previously cleared with `ClearPrivateFolderMD` to their // newest version. It works asynchronously, so no error is // returned. ForceFastForward(ctx context.Context) // TeamNameChanged indicates that a team has changed its name, and // we should clean up any outstanding handle info associated with // the team ID. TeamNameChanged(ctx context.Context, tid keybase1.TeamID) // KickoffAllOutstandingRekeys kicks off all outstanding rekeys. It does // nothing to folders that have not scheduled a rekey. This should be // called when we receive an event of "paper key cached" from service. KickoffAllOutstandingRekeys() error } type merkleRootGetter interface { // GetCurrentMerkleRoot returns the current root of the global // Keybase Merkle tree. GetCurrentMerkleRoot(ctx context.Context) (keybase1.MerkleRootV2, error) } type gitMetadataPutter interface { PutGitMetadata(ctx context.Context, folder keybase1.Folder, repoID keybase1.RepoID, repoName keybase1.GitRepoName) error } // KeybaseService is an interface for communicating with the keybase // service. type KeybaseService interface { merkleRootGetter gitMetadataPutter // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UID) mapping // can be trusted. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // Identify, given an assertion, returns a UserInfo struct // with the user that matches that assertion, or an error // otherwise. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // ResolveIdentifyImplicitTeam resolves, and optionally // identifies, an implicit team. If the implicit team doesn't yet // exist, and doIdentifies is true, one is created. ResolveIdentifyImplicitTeam( ctx context.Context, assertions, suffix string, tlfType tlf.Type, doIdentifies bool, reason string) (ImplicitTeamInfo, error) // LoadUserPlusKeys returns a UserInfo struct for a // user with the specified UID. // If you have the UID for a user and don't require Identify to // validate an assertion or the identity of a user, use this to // get UserInfo structs as it is much cheaper than Identify. // // pollForKID, if non empty, causes `PollForKID` field to be populated, which // causes the service to poll for the given KID. This is useful during // provisioning where the provisioner needs to get the MD revision that the // provisionee has set the rekey bit on. LoadUserPlusKeys(ctx context.Context, uid keybase1.UID, pollForKID keybase1.KID) (UserInfo, error) // LoadUnverifiedKeys returns a list of unverified public keys. They are the union // of all known public keys associated with the account and the currently verified // keys currently part of the user's sigchain. LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) ( []keybase1.PublicKey, error) // LoadTeamPlusKeys returns a TeamInfo struct for a team with the // specified TeamID. The caller can specify `desiredKeyGen` to // force a server check if that particular key gen isn't yet // known; it may be set to UnspecifiedKeyGen if no server check is // required. The caller can specify `desiredUID` and // `desiredRole` to force a server check if that particular UID // isn't a member of the team yet according to local caches; it // may be set to "" if no server check is required. LoadTeamPlusKeys(ctx context.Context, tid keybase1.TeamID, desiredKeyGen kbfsmd.KeyGen, desiredUser keybase1.UserVersion, desiredRole keybase1.TeamRole) (TeamInfo, error) // CurrentSession returns a SessionInfo struct with all the // information for the current session, or an error otherwise. CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error) // FavoriteAdd adds the given folder to the list of favorites. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteAdd removes the given folder from the list of // favorites. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the current list of favorites. FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error // NotifySyncStatus sends a sync status notification. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) error // FlushUserFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached information about the given user. // This does NOT involve communication with the daemon, this is // just to force future calls loading this user to fall through to // the daemon itself, rather than being served from the cache. FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID) // FlushUserUnverifiedKeysFromLocalCache instructs this layer to clear any // KBFS-side, locally-cached unverified keys for the given user. FlushUserUnverifiedKeysFromLocalCache(ctx context.Context, uid keybase1.UID) // TODO: Add CryptoClient methods, too. // EstablishMountDir asks the service for the current mount path // and sets it if not established. EstablishMountDir(ctx context.Context) (string, error) // Shutdown frees any resources associated with this // instance. No other methods may be called after this is // called. Shutdown() } // KeybaseServiceCn defines methods needed to construct KeybaseService // and Crypto implementations. type KeybaseServiceCn interface { NewKeybaseService(config Config, params InitParams, ctx Context, log logger.Logger) (KeybaseService, error) NewCrypto(config Config, params InitParams, ctx Context, log logger.Logger) (Crypto, error) } type resolver interface { // Resolve, given an assertion, resolves it to a username/UID // pair. The username <-> UID mapping is trusted and // immutable, so it can be cached. If the assertion is just // the username or a UID assertion, then the resolution can // also be trusted. If the returned pair is equal to that of // the current session, then it can also be // trusted. Otherwise, Identify() needs to be called on the // assertion before the assertion -> (username, UserOrTeamID) mapping // can be trusted. // // TODO: some of the above assumptions on cacheability aren't // right for subteams, which can change their name, so this may // need updating. Resolve(ctx context.Context, assertion string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // ResolveImplicitTeam resolves the given implicit team. ResolveImplicitTeam( ctx context.Context, assertions, suffix string, tlfType tlf.Type) ( ImplicitTeamInfo, error) } type identifier interface { // Identify resolves an assertion (which could also be a // username) to a UserInfo struct, spawning tracker popups if // necessary. The reason string is displayed on any tracker // popups spawned. Identify(ctx context.Context, assertion, reason string) ( libkb.NormalizedUsername, keybase1.UserOrTeamID, error) // IdentifyImplicitTeam identifies (and creates if necessary) the // given implicit team. IdentifyImplicitTeam( ctx context.Context, assertions, suffix string, tlfType tlf.Type, reason string) (ImplicitTeamInfo, error) } type normalizedUsernameGetter interface { // GetNormalizedUsername returns the normalized username // corresponding to the given UID. GetNormalizedUsername(ctx context.Context, id keybase1.UserOrTeamID) ( libkb.NormalizedUsername, error) } // CurrentSessionGetter is an interface for objects that can return // session info. type CurrentSessionGetter interface { // GetCurrentSession gets the current session info. GetCurrentSession(ctx context.Context) (SessionInfo, error) } // teamMembershipChecker is a copy of kbfsmd.TeamMembershipChecker for // embedding in KBPKI. Unfortunately, this is necessary since mockgen // can't handle embedded interfaces living in other packages. type teamMembershipChecker interface { // IsTeamWriter is a copy of // kbfsmd.TeamMembershipChecker.IsTeamWriter. IsTeamWriter(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) (bool, error) // IsTeamReader is a copy of // kbfsmd.TeamMembershipChecker.IsTeamWriter. IsTeamReader(ctx context.Context, tid keybase1.TeamID, uid keybase1.UID) ( bool, error) } type teamKeysGetter interface { // GetTeamTLFCryptKeys gets all of a team's secret crypt keys, by // generation, as well as the latest key generation number for the // team. The caller can specify `desiredKeyGen` to force a server // check if that particular key gen isn't yet known; it may be set // to UnspecifiedKeyGen if no server check is required. GetTeamTLFCryptKeys(ctx context.Context, tid keybase1.TeamID, desiredKeyGen kbfsmd.KeyGen) ( map[kbfsmd.KeyGen]kbfscrypto.TLFCryptKey, kbfsmd.KeyGen, error) } type teamRootIDGetter interface { // GetTeamRootID returns the root team ID for the given (sub)team // ID. GetTeamRootID(ctx context.Context, tid keybase1.TeamID) ( keybase1.TeamID, error) } // KBPKI interacts with the Keybase daemon to fetch user info. type KBPKI interface { CurrentSessionGetter resolver identifier normalizedUsernameGetter merkleRootGetter teamMembershipChecker teamKeysGetter teamRootIDGetter gitMetadataPutter // HasVerifyingKey returns nil if the given user has the given // VerifyingKey, and an error otherwise. HasVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey, atServerTime time.Time) error // HasUnverifiedVerifyingKey returns nil if the given user has the given // unverified VerifyingKey, and an error otherwise. Note that any match // is with a key not verified to be currently connected to the user via // their sigchain. This is currently only used to verify finalized or // reset TLFs. Further note that unverified keys is a super set of // verified keys. HasUnverifiedVerifyingKey(ctx context.Context, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) error // GetCryptPublicKeys gets all of a user's crypt public keys (including // paper keys). GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) ( []kbfscrypto.CryptPublicKey, error) // TODO: Split the methods below off into a separate // FavoriteOps interface. // FavoriteAdd adds folder to the list of the logged in user's // favorite folders. It is idempotent. FavoriteAdd(ctx context.Context, folder keybase1.Folder) error // FavoriteDelete deletes folder from the list of the logged in user's // favorite folders. It is idempotent. FavoriteDelete(ctx context.Context, folder keybase1.Folder) error // FavoriteList returns the list of all favorite folders for // the logged in user. FavoriteList(ctx context.Context) ([]keybase1.Folder, error) // Notify sends a filesystem notification. Notify(ctx context.Context, notification *keybase1.FSNotification) error } // KeyMetadata is an interface for something that holds key // information. This is usually implemented by RootMetadata. type KeyMetadata interface { // TlfID returns the ID of the TLF for which this object holds // key info. TlfID() tlf.ID // TypeForKeying returns the keying type for this MD. TypeForKeying() tlf.KeyingType // LatestKeyGeneration returns the most recent key generation // with key data in this object, or PublicKeyGen if this TLF // is public. LatestKeyGeneration() kbfsmd.KeyGen // GetTlfHandle returns the handle for the TLF. It must not // return nil. // // TODO: Remove the need for this function in this interface, // so that kbfsmd.RootMetadata can implement this interface // fully. GetTlfHandle() *TlfHandle // IsWriter checks that the given user is a valid writer of the TLF // right now. IsWriter( ctx context.Context, checker kbfsmd.TeamMembershipChecker, uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey) ( bool, error) // HasKeyForUser returns whether or not the given user has // keys for at least one device. Returns an error if the TLF // is public. HasKeyForUser(user keybase1.UID) (bool, error) // GetTLFCryptKeyParams returns all the necessary info to // construct the TLF crypt key for the given key generation, // user, and device (identified by its crypt public key), or // false if not found. This returns an error if the TLF is // public. GetTLFCryptKeyParams( keyGen kbfsmd.KeyGen, user keybase1.UID, key kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.EncryptedTLFCryptKeyClientHalf, kbfscrypto.TLFCryptKeyServerHalfID, bool, error) // StoresHistoricTLFCryptKeys returns whether or not history keys are // symmetrically encrypted; if not, they're encrypted per-device. StoresHistoricTLFCryptKeys() bool // GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given // generation using the current generation's TLFCryptKey. GetHistoricTLFCryptKey(codec kbfscodec.Codec, keyGen kbfsmd.KeyGen, currentKey kbfscrypto.TLFCryptKey) ( kbfscrypto.TLFCryptKey, error) } type encryptionKeyGetter interface { // GetTLFCryptKeyForEncryption gets the crypt key to use for // encryption (i.e., with the latest key generation) for the // TLF with the given metadata. GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } type mdDecryptionKeyGetter interface { // GetTLFCryptKeyForMDDecryption gets the crypt key to use for the // TLF with the given metadata to decrypt the private portion of // the metadata. It finds the appropriate key from mdWithKeys // (which in most cases is the same as mdToDecrypt) if it's not // already cached. GetTLFCryptKeyForMDDecryption(ctx context.Context, kmdToDecrypt, kmdWithKeys KeyMetadata) ( kbfscrypto.TLFCryptKey, error) } type blockDecryptionKeyGetter interface { // GetTLFCryptKeyForBlockDecryption gets the crypt key to use // for the TLF with the given metadata to decrypt the block // pointed to by the given pointer. GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error) } type blockKeyGetter interface { encryptionKeyGetter blockDecryptionKeyGetter } // KeyManager fetches and constructs the keys needed for KBFS file // operations. type KeyManager interface { blockKeyGetter mdDecryptionKeyGetter // GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations // for current devices. keys contains crypt keys from all generations, in // order, starting from FirstValidKeyGen. GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) ( keys []kbfscrypto.TLFCryptKey, err error) // Rekey checks the given MD object, if it is a private TLF, // against the current set of device keys for all valid // readers and writers. If there are any new devices, it // updates all existing key generations to include the new // devices. If there are devices that have been removed, it // creates a new epoch of keys for the TLF. If there was an // error, or the RootMetadata wasn't changed, it returns false. // Otherwise, it returns true. If a new key generation is // added the second return value points to this new key. This // is to allow for caching of the TLF crypt key only after a // successful merged write of the metadata. Otherwise we could // prematurely pollute the key cache. // // If the given MD object is a public TLF, it simply updates // the TLF's handle with any newly-resolved writers. // // If promptPaper is set, prompts for any unlocked paper keys. // promptPaper shouldn't be set if md is for a public TLF. Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) ( bool, *kbfscrypto.TLFCryptKey, error) } // Reporter exports events (asynchronously) to any number of sinks type Reporter interface { // ReportErr records that a given error happened. ReportErr(ctx context.Context, tlfName tlf.CanonicalName, t tlf.Type, mode ErrorModeType, err error) // AllKnownErrors returns all errors known to this Reporter. AllKnownErrors() []ReportedError // Notify sends the given notification to any sink. Notify(ctx context.Context, notification *keybase1.FSNotification) // NotifySyncStatus sends the given path sync status to any sink. NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) // Shutdown frees any resources allocated by a Reporter. Shutdown() } // MDCache gets and puts plaintext top-level metadata into the cache. type MDCache interface { // Get gets the metadata object associated with the given TLF ID, // revision number, and branch ID (kbfsmd.NullBranchID for merged MD). Get(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) (ImmutableRootMetadata, error) // Put stores the metadata object, only if an MD matching that TLF // ID, revision number, and branch ID isn't already cached. If // there is already a matching item in the cache, we require that // caller manages the cache explicitly by deleting or replacing it // explicitly. This should be used when putting existing MDs // being fetched from the server. Put(md ImmutableRootMetadata) error // Delete removes the given metadata object from the cache if it exists. Delete(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) // Replace replaces the entry matching the md under the old branch // ID with the new one. If the old entry doesn't exist, this is // equivalent to a Put, except that it overrides anything else // that's already in the cache. This should be used when putting // new MDs created locally. Replace(newRmd ImmutableRootMetadata, oldBID kbfsmd.BranchID) error // MarkPutToServer sets `PutToServer` to true for the specified // MD, if it already exists in the cache. MarkPutToServer(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) // GetIDForHandle retrieves a cached, trusted TLF ID for the given // handle, if one exists. GetIDForHandle(handle *TlfHandle) (tlf.ID, error) // PutIDForHandle caches a trusted TLF ID for the given handle. PutIDForHandle(handle *TlfHandle, id tlf.ID) error // ChangeHandleForID moves an ID to be under a new handle, if the // ID is cached already. ChangeHandleForID(oldHandle *TlfHandle, newHandle *TlfHandle) } // KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys. type KeyCache interface { // GetTLFCryptKey gets the crypt key for the given TLF. GetTLFCryptKey(tlf.ID, kbfsmd.KeyGen) (kbfscrypto.TLFCryptKey, error) // PutTLFCryptKey stores the crypt key for the given TLF. PutTLFCryptKey(tlf.ID, kbfsmd.KeyGen, kbfscrypto.TLFCryptKey) error } // BlockCacheLifetime denotes the lifetime of an entry in BlockCache. type BlockCacheLifetime int func (l BlockCacheLifetime) String() string { switch l { case NoCacheEntry: return "NoCacheEntry" case TransientEntry: return "TransientEntry" case PermanentEntry: return "PermanentEntry" } return "Unknown" } const ( // NoCacheEntry means that the entry will not be cached. NoCacheEntry BlockCacheLifetime = iota // TransientEntry means that the cache entry may be evicted at // any time. TransientEntry // PermanentEntry means that the cache entry must remain until // explicitly removed from the cache. PermanentEntry ) // BlockCacheSimple gets and puts plaintext dir blocks and file blocks into // a cache. These blocks are immutable and identified by their // content hash. type BlockCacheSimple interface { // Get gets the block associated with the given block ID. Get(ptr BlockPointer) (Block, error) // Put stores the final (content-addressable) block associated // with the given block ID. If lifetime is TransientEntry, // then it is assumed that the block exists on the server and // the entry may be evicted from the cache at any time. If // lifetime is PermanentEntry, then it is assumed that the // block doesn't exist on the server and must remain in the // cache until explicitly removed. As an intermediary state, // as when a block is being sent to the server, the block may // be put into the cache both with TransientEntry and // PermanentEntry -- these are two separate entries. This is // fine, since the block should be the same. Put(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) error } // BlockCache specifies the interface of BlockCacheSimple, and also more // advanced and internal methods. type BlockCache interface { BlockCacheSimple // CheckForKnownPtr sees whether this cache has a transient // entry for the given file block, which must be a direct file // block containing data). Returns the full BlockPointer // associated with that ID, including key and data versions. // If no ID is known, return an uninitialized BlockPointer and // a nil error. CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error) // DeleteTransient removes the transient entry for the given // pointer from the cache, as well as any cached IDs so the block // won't be reused. DeleteTransient(ptr BlockPointer, tlf tlf.ID) error // Delete removes the permanent entry for the non-dirty block // associated with the given block ID from the cache. No // error is returned if no block exists for the given ID. DeletePermanent(id kbfsblock.ID) error // DeleteKnownPtr removes the cached ID for the given file // block. It does not remove the block itself. DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error // GetWithPrefetch retrieves a block from the cache, along with the block's // prefetch status. GetWithPrefetch(ptr BlockPointer) (block Block, prefetchStatus PrefetchStatus, lifetime BlockCacheLifetime, err error) // PutWithPrefetch puts a block into the cache, along with whether or not // it has triggered or finished a prefetch. PutWithPrefetch(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) error // SetCleanBytesCapacity atomically sets clean bytes capacity for block // cache. SetCleanBytesCapacity(capacity uint64) // GetCleanBytesCapacity atomically gets clean bytes capacity for block // cache. GetCleanBytesCapacity() (capacity uint64) } // DirtyPermChan is a channel that gets closed when the holder has // permission to write. We are forced to define it as a type due to a // bug in mockgen that can't handle return values with a chan // struct{}. type DirtyPermChan <-chan struct{} // DirtyBlockCache gets and puts plaintext dir blocks and file blocks // into a cache, which have been modified by the application and not // yet committed on the KBFS servers. They are identified by a // (potentially random) ID that may not have any relationship with // their context, along with a Branch in case the same TLF is being // modified via multiple branches. Dirty blocks are never evicted, // they must be deleted explicitly. type DirtyBlockCache interface { // Get gets the block associated with the given block ID. Returns // the dirty block for the given ID, if one exists. Get(tlfID tlf.ID, ptr BlockPointer, branch BranchName) (Block, error) // Put stores a dirty block currently identified by the // given block pointer and branch name. Put(tlfID tlf.ID, ptr BlockPointer, branch BranchName, block Block) error // Delete removes the dirty block associated with the given block // pointer and branch from the cache. No error is returned if no // block exists for the given ID. Delete(tlfID tlf.ID, ptr BlockPointer, branch BranchName) error // IsDirty states whether or not the block associated with the // given block pointer and branch name is dirty in this cache. IsDirty(tlfID tlf.ID, ptr BlockPointer, branch BranchName) bool // IsAnyDirty returns whether there are any dirty blocks in the // cache. tlfID may be ignored. IsAnyDirty(tlfID tlf.ID) bool // RequestPermissionToDirty is called whenever a user wants to // write data to a file. The caller provides an estimated number // of bytes that will become dirty -- this is difficult to know // exactly without pre-fetching all the blocks involved, but in // practice we can just use the number of bytes sent in via the // Write. It returns a channel that blocks until the cache is // ready to receive more dirty data, at which point the channel is // closed. The user must call // `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has // completed its write and called `UpdateUnsyncedBytes` for all // the exact dirty block sizes. RequestPermissionToDirty(ctx context.Context, tlfID tlf.ID, estimatedDirtyBytes int64) (DirtyPermChan, error) // UpdateUnsyncedBytes is called by a user, who has already been // granted permission to write, with the delta in block sizes that // were dirtied as part of the write. So for example, if a // newly-dirtied block of 20 bytes was extended by 5 bytes, they // should send 25. If on the next write (before any syncs), bytes // 10-15 of that same block were overwritten, they should send 0 // over the channel because there were no new bytes. If an // already-dirtied block is truncated, or if previously requested // bytes have now been updated more accurately in previous // requests, newUnsyncedBytes may be negative. wasSyncing should // be true if `BlockSyncStarted` has already been called for this // block. UpdateUnsyncedBytes(tlfID tlf.ID, newUnsyncedBytes int64, wasSyncing bool) // UpdateSyncingBytes is called when a particular block has // started syncing, or with a negative number when a block is no // longer syncing due to an error (and BlockSyncFinished will // never be called). UpdateSyncingBytes(tlfID tlf.ID, size int64) // BlockSyncFinished is called when a particular block has // finished syncing, though the overall sync might not yet be // complete. This lets the cache know it might be able to grant // more permission to writers. BlockSyncFinished(tlfID tlf.ID, size int64) // SyncFinished is called when a complete sync has completed and // its dirty blocks have been removed from the cache. This lets // the cache know it might be able to grant more permission to // writers. SyncFinished(tlfID tlf.ID, size int64) // ShouldForceSync returns true if the sync buffer is full enough // to force all callers to sync their data immediately. ShouldForceSync(tlfID tlf.ID) bool // Shutdown frees any resources associated with this instance. It // returns an error if there are any unsynced blocks. Shutdown() error } // DiskBlockCache caches blocks to the disk. type DiskBlockCache interface { // Get gets a block from the disk cache. Get(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID) ( buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, prefetchStatus PrefetchStatus, err error) // Put puts a block to the disk cache. Returns after it has updated the // metadata but before it has finished writing the block. Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // Delete deletes some blocks from the disk cache. Delete(ctx context.Context, blockIDs []kbfsblock.ID) (numRemoved int, sizeRemoved int64, err error) // UpdateMetadata updates metadata for a given block in the disk cache. UpdateMetadata(ctx context.Context, blockID kbfsblock.ID, prefetchStatus PrefetchStatus) error // Status returns the current status of the disk cache. Status(ctx context.Context) map[string]DiskBlockCacheStatus // Shutdown cleanly shuts down the disk block cache. Shutdown(ctx context.Context) } // cryptoPure contains all methods of Crypto that don't depend on // implicit state, i.e. they're pure functions of the input. type cryptoPure interface { // MakeRandomTlfID generates a dir ID using a CSPRNG. MakeRandomTlfID(t tlf.Type) (tlf.ID, error) // MakeRandomBranchID generates a per-device branch ID using a // CSPRNG. It will not return LocalSquashBranchID or // kbfsmd.NullBranchID. MakeRandomBranchID() (kbfsmd.BranchID, error) // MakeTemporaryBlockID generates a temporary block ID using a // CSPRNG. This is used for indirect blocks before they're // committed to the server. MakeTemporaryBlockID() (kbfsblock.ID, error) // MakeRefNonce generates a block reference nonce using a // CSPRNG. This is used for distinguishing different references to // the same BlockID. MakeBlockRefNonce() (kbfsblock.RefNonce, error) // MakeRandomTLFEphemeralKeys generates ephemeral keys using a // CSPRNG for a TLF. These keys can then be used to key/rekey // the TLF. MakeRandomTLFEphemeralKeys() (kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.TLFEphemeralPrivateKey, error) // MakeRandomTLFKeys generates keys using a CSPRNG for a // single key generation of a TLF. MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey, kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error) // MakeRandomBlockCryptKeyServerHalf generates the server-side of // a block crypt key. MakeRandomBlockCryptKeyServerHalf() ( kbfscrypto.BlockCryptKeyServerHalf, error) // EncryptPrivateMetadata encrypts a PrivateMetadata object. EncryptPrivateMetadata( pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) ( kbfscrypto.EncryptedPrivateMetadata, error) // DecryptPrivateMetadata decrypts a PrivateMetadata object. DecryptPrivateMetadata( encryptedPMD kbfscrypto.EncryptedPrivateMetadata, key kbfscrypto.TLFCryptKey) (PrivateMetadata, error) // EncryptBlocks encrypts a block. plainSize is the size of the encoded // block; EncryptBlock() must guarantee that plainSize <= // len(encryptedBlock). EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) ( plainSize int, encryptedBlock kbfscrypto.EncryptedBlock, err error) // DecryptBlock decrypts a block. Similar to EncryptBlock(), // DecryptBlock() must guarantee that (size of the decrypted // block) <= len(encryptedBlock). DecryptBlock(encryptedBlock kbfscrypto.EncryptedBlock, key kbfscrypto.BlockCryptKey, block Block) error } // Crypto signs, verifies, encrypts, and decrypts stuff. type Crypto interface { cryptoPure // Duplicate kbfscrypto.Signer here to work around gomock's // limitations. Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignForKBFS(context.Context, []byte) (kbfscrypto.SignatureInfo, error) SignToString(context.Context, []byte) (string, error) // DecryptTLFCryptKeyClientHalf decrypts a // kbfscrypto.TLFCryptKeyClientHalf using the current device's // private key and the TLF's ephemeral public key. DecryptTLFCryptKeyClientHalf(ctx context.Context, publicKey kbfscrypto.TLFEphemeralPublicKey, encryptedClientHalf kbfscrypto.EncryptedTLFCryptKeyClientHalf) ( kbfscrypto.TLFCryptKeyClientHalf, error) // DecryptTLFCryptKeyClientHalfAny decrypts one of the // kbfscrypto.TLFCryptKeyClientHalf using the available // private keys and the ephemeral public key. If promptPaper // is true, the service will prompt the user for any unlocked // paper keys. DecryptTLFCryptKeyClientHalfAny(ctx context.Context, keys []EncryptedTLFCryptKeyClientAndEphemeral, promptPaper bool) ( kbfscrypto.TLFCryptKeyClientHalf, int, error) // Shutdown frees any resources associated with this instance. Shutdown() } type tlfIDGetter interface { // GetIDForHandle returns the tlf.ID associated with the given // handle, if the logged-in user has read permission on the // folder. It may or may not create the folder if it doesn't // exist yet, and it may return `tlf.NullID` with a `nil` error if // it doesn't create a missing folder. GetIDForHandle(ctx context.Context, handle *TlfHandle) (tlf.ID, error) } // MDOps gets and puts root metadata to an MDServer. On a get, it // verifies the metadata is signed by the metadata's signing key. type MDOps interface { tlfIDGetter // GetForTLF returns the current metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. // // If lockBeforeGet is not nil, it causes mdserver to take the lock on the // lock ID before the get. GetForTLF(ctx context.Context, id tlf.ID, lockBeforeGet *keybase1.LockID) ( ImmutableRootMetadata, error) // GetUnmergedForTLF is the same as the above but for unmerged // metadata. GetUnmergedForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) ( ImmutableRootMetadata, error) // GetRange returns a range of metadata objects corresponding to // the passed revision numbers (inclusive). // // If lockBeforeGet is not nil, it causes mdserver to take the lock on the // lock ID before the get. GetRange(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision, lockID *keybase1.LockID) ([]ImmutableRootMetadata, error) // GetUnmergedRange is the same as the above but for unmerged // metadata history (inclusive). GetUnmergedRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, start, stop kbfsmd.Revision) ([]ImmutableRootMetadata, error) // Put stores the metadata object for the given top-level folder. // This also adds the resulting ImmutableRootMetadata object to // the mdcache, if the Put is successful. Note that constructing // the ImmutableRootMetadata requires knowing the verifying key, // which might not be the same as the local user's verifying key // if the MD has been copied from a previous update. // // If lockContext is not nil, it causes the mdserver to check a lockID at // the time of the put, and optionally (if specified in lockContext) // releases the lock on the lock ID if the put is successful. Releasing the // lock in mdserver is idempotent. Note that journalMDOps doesn't support // lockContext for now. If journaling is enabled, use FinishSinbleOp to // require locks. // // The priority parameter specifies the priority of this particular MD put // operation. When conflict happens, mdserver tries to prioritize writes // with higher priorities. Caller should use pre-defined (or define new) // constants in keybase1 package, such as keybase1.MDPriorityNormal. Note // that journalMDOps doesn't support any priority other than // MDPriorityNormal for now. If journaling is enabled, use FinishSinbleOp // to override priority. Put(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey, lockContext *keybase1.LockContext, priority keybase1.MDPriority) ( ImmutableRootMetadata, error) // PutUnmerged is the same as the above but for unmerged metadata // history. This also adds the resulting ImmutableRootMetadata // object to the mdcache, if the PutUnmerged is successful. Note // that constructing the ImmutableRootMetadata requires knowing // the verifying key, which might not be the same as the local // user's verifying key if the MD has been copied from a previous // update. PutUnmerged(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // PruneBranch prunes all unmerged history for the given TLF // branch. PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error // ResolveBranch prunes all unmerged history for the given TLF // branch, and also deletes any blocks in `blocksToDelete` that // are still in the local journal. In addition, it appends the // given MD to the journal. This also adds the resulting // ImmutableRootMetadata object to the mdcache, if the // ResolveBranch is successful. Note that constructing the // ImmutableRootMetadata requires knowing the verifying key, which // might not be the same as the local user's verifying key if the // MD has been copied from a previous update. ResolveBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) (ImmutableRootMetadata, error) // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error) } // KeyOps fetches server-side key halves from the key server. type KeyOps interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error } // Prefetcher is an interface to a block prefetcher. type Prefetcher interface { // ProcessBlockForPrefetch potentially triggers and monitors a prefetch. ProcessBlockForPrefetch(ctx context.Context, ptr BlockPointer, block Block, kmd KeyMetadata, priority int, lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) // CancelPrefetch notifies the prefetcher that a prefetch should be // canceled. CancelPrefetch(kbfsblock.ID) // Shutdown shuts down the prefetcher idempotently. Future calls to // the various Prefetch* methods will return io.EOF. The returned channel // allows upstream components to block until all pending prefetches are // complete. This feature is mainly used for testing, but also to toggle // the prefetcher on and off. Shutdown() <-chan struct{} } // BlockOps gets and puts data blocks to a BlockServer. It performs // the necessary crypto operations on each block. type BlockOps interface { blockRetrieverGetter // Get gets the block associated with the given block pointer // (which belongs to the TLF with the given key metadata), // decrypts it if necessary, and fills in the provided block // object with its contents, if the logged-in user has read // permission for that block. cacheLifetime controls the behavior of the // write-through cache once a Get completes. Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer, block Block, cacheLifetime BlockCacheLifetime) error // GetEncodedSize gets the encoded size of the block associated // with the given block pointer (which belongs to the TLF with the // given key metadata). GetEncodedSize(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (uint32, error) // Ready turns the given block (which belongs to the TLF with // the given key metadata) into encoded (and encrypted) data, // and calculates its ID and size, so that we can do a bunch // of block puts in parallel for every write. Ready() must // guarantee that plainSize <= readyBlockData.QuotaSize(). Ready(ctx context.Context, kmd KeyMetadata, block Block) ( id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData, err error) // Delete instructs the server to delete the given block references. // It returns the number of not-yet deleted references to // each block reference Delete(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) ( liveCounts map[kbfsblock.ID]int, err error) // Archive instructs the server to mark the given block references // as "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. Archive(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) error // TogglePrefetcher activates or deactivates the prefetcher. TogglePrefetcher(enable bool) <-chan struct{} // Prefetcher retrieves this BlockOps' Prefetcher. Prefetcher() Prefetcher // Shutdown shuts down all the workers performing Get operations Shutdown() } // Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around // gomock's limitations. type authTokenRefreshHandler interface { RefreshAuthToken(context.Context) } // MDServer gets and puts metadata for each top-level directory. The // instantiation should be able to fetch session/user details via KBPKI. On a // put, the server is responsible for 1) ensuring the user has appropriate // permissions for whatever modifications were made; 2) ensuring that // LastModifyingWriter and LastModifyingUser are updated appropriately; and 3) // detecting conflicting writes based on the previous root block ID (i.e., when // it supports strict consistency). On a get, it verifies the logged-in user // has read permissions. // // TODO: Add interface for searching by time type MDServer interface { authTokenRefreshHandler // GetForHandle returns the current (signed/encrypted) metadata // object corresponding to the given top-level folder's handle, if // the logged-in user has read permission on the folder. It // creates the folder if one doesn't exist yet, and the logged-in // user has permission to do so. // // If lockBeforeGet is not nil, it takes a lock on the lock ID before // trying to get anything. If taking the lock fails, an error is returned. // Note that taking a lock from the mdserver is idempotent. // // If there is no returned error, then the returned ID must // always be non-null. A nil *RootMetadataSigned may be // returned, but if it is non-nil, then its ID must match the // returned ID. GetForHandle(ctx context.Context, handle tlf.Handle, mStatus kbfsmd.MergeStatus, lockBeforeGet *keybase1.LockID) ( tlf.ID, *RootMetadataSigned, error) // GetForTLF returns the current (signed/encrypted) metadata object // corresponding to the given top-level folder, if the logged-in // user has read permission on the folder. // // If lockBeforeGet is not nil, it takes a lock on the lock ID before // trying to get anything. If taking the lock fails, an error is returned. // Note that taking a lock from the mdserver is idempotent. GetForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, lockBeforeGet *keybase1.LockID) (*RootMetadataSigned, error) // GetRange returns a range of (signed/encrypted) metadata objects // corresponding to the passed revision numbers (inclusive). // // If lockBeforeGet is not nil, it takes a lock on the lock ID before // trying to get anything. If taking the lock fails, an error is returned. // Note that taking a lock from the mdserver is idempotent. GetRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, start, stop kbfsmd.Revision, lockBeforeGet *keybase1.LockID) ( []*RootMetadataSigned, error) // Put stores the (signed/encrypted) metadata object for the given // top-level folder. Note: If the unmerged bit is set in the metadata // block's flags bitmask it will be appended to the unmerged per-device // history. // // If lockContext is not nil, it causes the mdserver to check a lockID at // the time of the put, and optionally (if specified in lockContext) // releases the lock on the lock ID if the put is successful. Releasing the // lock in mdserver is idempotent. Put(ctx context.Context, rmds *RootMetadataSigned, extra kbfsmd.ExtraMetadata, lockContext *keybase1.LockContext, priority keybase1.MDPriority) error // Lock ensures lockID for tlfID is taken by this session, i.e., // idempotently take the lock. If the lock is already taken by *another* // session, mdserver returns a throttle error, causing RPC layer at client // to retry. So caller of this method should observe a behavior similar to // blocking call, which upon successful return, makes sure the lock is // taken on the server. Note that the lock expires after certain time, so // it's important to make writes contingent to the lock by requiring the // lockID in Put. Lock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error // Release Lock ensures lockID for tlfID is not taken by this session, i.e., // idempotently release the lock. If the lock is already released or // expired, this is a no-op. ReleaseLock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error // PruneBranch prunes all unmerged history for the given TLF branch. PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error // RegisterForUpdate tells the MD server to inform the caller when // there is a merged update with a revision number greater than // currHead, which did NOT originate from this same MD server // session. This method returns a chan which can receive only a // single error before it's closed. If the received err is nil, // then there is updated MD ready to fetch which didn't originate // locally; if it is non-nil, then the previous registration // cannot send the next notification (e.g., the connection to the // MD server may have failed). In either case, the caller must // re-register to get a new chan that can receive future update // notifications. RegisterForUpdate(ctx context.Context, id tlf.ID, currHead kbfsmd.Revision) (<-chan error, error) // CancelRegistration lets the local MDServer instance know that // we are no longer interested in updates for the specified // folder. It does not necessarily forward this cancellation to // remote servers. CancelRegistration(ctx context.Context, id tlf.ID) // CheckForRekeys initiates the rekey checking process on the // server. The server is allowed to delay this request, and so it // returns a channel for returning the error. Actual rekey // requests are expected to come in asynchronously. CheckForRekeys(ctx context.Context) <-chan error // TruncateLock attempts to take the history truncation lock for // this folder, for a TTL defined by the server. Returns true if // the lock was successfully taken. TruncateLock(ctx context.Context, id tlf.ID) (bool, error) // TruncateUnlock attempts to release the history truncation lock // for this folder. Returns true if the lock was successfully // released. TruncateUnlock(ctx context.Context, id tlf.ID) (bool, error) // DisableRekeyUpdatesForTesting disables processing rekey updates // received from the mdserver while testing. DisableRekeyUpdatesForTesting() // Shutdown is called to shutdown an MDServer connection. Shutdown() // IsConnected returns whether the MDServer is connected. IsConnected() bool // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it // entered into a conflicting state. For the highest level of confidence, the caller // should verify the mapping with a Merkle tree lookup. GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error) // OffsetFromServerTime is the current estimate for how off our // local clock is from the mdserver clock. Add this to any // mdserver-provided timestamps to get the "local" time of the // corresponding event. If the returned bool is false, then we // don't have a current estimate for the offset. OffsetFromServerTime() (time.Duration, bool) // GetKeyBundles looks up the key bundles for the given key // bundle IDs. tlfID must be non-zero but either or both wkbID // and rkbID can be zero, in which case nil will be returned // for the respective bundle. If a bundle cannot be found, an // error is returned and nils are returned for both bundles. GetKeyBundles(ctx context.Context, tlfID tlf.ID, wkbID kbfsmd.TLFWriterKeyBundleID, rkbID kbfsmd.TLFReaderKeyBundleID) ( *kbfsmd.TLFWriterKeyBundleV3, *kbfsmd.TLFReaderKeyBundleV3, error) // CheckReachability is called when the Keybase service sends a notification // that network connectivity has changed. CheckReachability(ctx context.Context) // FastForwardBackoff fast forwards any existing backoff timer for // reconnects. If MD server is connected at the time this is called, it's // essentially a no-op. FastForwardBackoff() } type mdServerLocal interface { MDServer addNewAssertionForTest( uid keybase1.UID, newAssertion keybase1.SocialAssertion) error getCurrentMergedHeadRevision(ctx context.Context, id tlf.ID) ( rev kbfsmd.Revision, err error) isShutdown() bool copy(config mdServerLocalConfig) mdServerLocal } // BlockServer gets and puts opaque data blocks. The instantiation // should be able to fetch session/user details via KBPKI. On a // put/delete, the server is reponsible for: 1) checking that the ID // matches the hash of the buffer; and 2) enforcing writer quotas. type BlockServer interface { authTokenRefreshHandler // Get gets the (encrypted) block data associated with the given // block ID and context, uses the provided block key to decrypt // the block, and fills in the provided block object with its // contents, if the logged-in user has read permission for that // block. Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) ( []byte, kbfscrypto.BlockCryptKeyServerHalf, error) // Put stores the (encrypted) block data under the given ID // and context on the server, along with the server half of // the block key. context should contain a kbfsblock.RefNonce // of zero. There will be an initial reference for this block // for the given context. // // Put should be idempotent, although it should also return an // error if, for a given ID, any of the other arguments differ // from previous Put calls with the same ID. // // If this returns a kbfsblock.ServerErrorOverQuota, with // Throttled=false, the caller can treat it as informational // and otherwise ignore the error. Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // PutAgain re-stores a previously deleted block under the same ID // with the same data. PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error // AddBlockReference adds a new reference to the given block, // defined by the given context (which should contain a // non-zero kbfsblock.RefNonce). (Contexts with a // kbfsblock.RefNonce of zero should be used when putting the // block for the first time via Put().) Returns a // kbfsblock.ServerErrorBlockNonExistent if id is unknown within this // folder. // // AddBlockReference should be idempotent, although it should // also return an error if, for a given ID and refnonce, any // of the other fields of context differ from previous // AddBlockReference calls with the same ID and refnonce. // // If this returns a kbfsblock.ServerErrorOverQuota, with // Throttled=false, the caller can treat it as informational // and otherwise ignore the error. AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) error // RemoveBlockReferences removes the references to the given block // ID defined by the given contexts. If no references to the block // remain after this call, the server is allowed to delete the // corresponding block permanently. If the reference defined by // the count has already been removed, the call is a no-op. // It returns the number of remaining not-yet-deleted references after this // reference has been removed RemoveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error) // ArchiveBlockReferences marks the given block references as // "archived"; that is, they are not being used in the current // view of the folder, and shouldn't be served to anyone other // than folder writers. // // For a given ID/refnonce pair, ArchiveBlockReferences should // be idempotent, although it should also return an error if // any of the other fields of the context differ from previous // calls with the same ID/refnonce pair. ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID, contexts kbfsblock.ContextMap) error // IsUnflushed returns whether a given block is being queued // locally for later flushing to another block server. If the // block is currently being flushed to the server, this should // return `true`, so that the caller will try to clean it up from // the server if it's no longer needed. IsUnflushed(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID) ( bool, error) // Shutdown is called to shutdown a BlockServer connection. Shutdown(ctx context.Context) // GetUserQuotaInfo returns the quota for the logged-in user. GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error) // GetTeamQuotaInfo returns the quota for a team. GetTeamQuotaInfo(ctx context.Context, tid keybase1.TeamID) ( info *kbfsblock.QuotaInfo, err error) } // blockServerLocal is the interface for BlockServer implementations // that store data locally. type blockServerLocal interface { BlockServer // getAllRefsForTest returns all the known block references // for the given TLF, and should only be used during testing. getAllRefsForTest(ctx context.Context, tlfID tlf.ID) ( map[kbfsblock.ID]blockRefMap, error) } // BlockSplitter decides when a file or directory block needs to be split type BlockSplitter interface { // CopyUntilSplit copies data into the block until we reach the // point where we should split, but only if writing to the end of // the last block. If this is writing into the middle of a file, // just copy everything that will fit into the block, and assume // that block boundaries will be fixed later. Return how much was // copied. CopyUntilSplit( block *FileBlock, lastBlock bool, data []byte, off int64) int64 // CheckSplit, given a block, figures out whether it ends at the // right place. If so, return 0. If not, return either the // offset in the block where it should be split, or -1 if more // bytes from the next block should be appended. CheckSplit(block *FileBlock) int64 // MaxPtrsPerBlock describes the number of indirect pointers we // can fit into one indirect block. MaxPtrsPerBlock() int // ShouldEmbedBlockChanges decides whether we should keep the // block changes embedded in the MD or not. ShouldEmbedBlockChanges(bc *BlockChanges) bool } // KeyServer fetches/writes server-side key halves from/to the key server. type KeyServer interface { // GetTLFCryptKeyServerHalf gets a server-side key half for a // device given the key half ID. GetTLFCryptKeyServerHalf(ctx context.Context, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID, cryptPublicKey kbfscrypto.CryptPublicKey) ( kbfscrypto.TLFCryptKeyServerHalf, error) // PutTLFCryptKeyServerHalves stores a server-side key halves for a // set of users and devices. PutTLFCryptKeyServerHalves(ctx context.Context, keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error // DeleteTLFCryptKeyServerHalf deletes a server-side key half for a // device given the key half ID. DeleteTLFCryptKeyServerHalf(ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error // Shutdown is called to free any KeyServer resources. Shutdown() } // NodeChange represents a change made to a node as part of an atomic // file system operation. type NodeChange struct { Node Node // Basenames of entries added/removed. DirUpdated []string FileUpdated []WriteRange } // Observer can be notified that there is an available update for a // given directory. The notification callbacks should not block, or // make any calls to the Notifier interface. Nodes passed to the // observer should not be held past the end of the notification // callback. type Observer interface { // LocalChange announces that the file at this Node has been // updated locally, but not yet saved at the server. LocalChange(ctx context.Context, node Node, write WriteRange) // BatchChanges announces that the nodes have all been updated // together atomically. Each NodeChange in changes affects the // same top-level folder and branch. BatchChanges(ctx context.Context, changes []NodeChange) // TlfHandleChange announces that the handle of the corresponding // folder branch has changed, likely due to previously-unresolved // assertions becoming resolved. This indicates that the listener // should switch over any cached paths for this folder-branch to // the new name. Nodes that were acquired under the old name will // still continue to work, but new lookups on the old name may // either encounter alias errors or entirely new TLFs (in the case // of conflicts). TlfHandleChange(ctx context.Context, newHandle *TlfHandle) } // Notifier notifies registrants of directory changes type Notifier interface { // RegisterForChanges declares that the given Observer wants to // subscribe to updates for the given top-level folders. RegisterForChanges(folderBranches []FolderBranch, obs Observer) error // UnregisterFromChanges declares that the given Observer no // longer wants to subscribe to updates for the given top-level // folders. UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error } // Clock is an interface for getting the current time type Clock interface { // Now returns the current time. Now() time.Time } // ConflictRenamer deals with names for conflicting directory entries. type ConflictRenamer interface { // ConflictRename returns the appropriately modified filename. ConflictRename(ctx context.Context, op op, original string) ( string, error) } // Tracer maybe adds traces to contexts. type Tracer interface { // MaybeStartTrace, if tracing is on, returns a new context // based on the given one with an attached trace made with the // given family and title. Otherwise, it returns the given // context unchanged. MaybeStartTrace(ctx context.Context, family, title string) context.Context // MaybeFinishTrace, finishes the trace attached to the given // context, if any. MaybeFinishTrace(ctx context.Context, err error) } type initModeGetter interface { // Mode indicates how KBFS is configured to run. Mode() InitMode // IsTestMode() inidicates whether KBFS is running in a test. IsTestMode() bool } // Config collects all the singleton instance instantiations needed to // run KBFS in one place. The methods below are self-explanatory and // do not require comments. type Config interface { dataVersioner logMaker blockCacher blockServerGetter codecGetter cryptoPureGetter keyGetterGetter cryptoGetter signerGetter currentSessionGetterGetter diskBlockCacheGetter diskBlockCacheSetter clockGetter diskLimiterGetter syncedTlfGetterSetter initModeGetter Tracer KBFSOps() KBFSOps SetKBFSOps(KBFSOps) KBPKI() KBPKI SetKBPKI(KBPKI) KeyManager() KeyManager SetKeyManager(KeyManager) Reporter() Reporter SetReporter(Reporter) MDCache() MDCache SetMDCache(MDCache) KeyCache() KeyCache SetKeyBundleCache(kbfsmd.KeyBundleCache) KeyBundleCache() kbfsmd.KeyBundleCache SetKeyCache(KeyCache) SetBlockCache(BlockCache) DirtyBlockCache() DirtyBlockCache SetDirtyBlockCache(DirtyBlockCache) SetCrypto(Crypto) SetCodec(kbfscodec.Codec) MDOps() MDOps SetMDOps(MDOps) KeyOps() KeyOps SetKeyOps(KeyOps) BlockOps() BlockOps SetBlockOps(BlockOps) MDServer() MDServer SetMDServer(MDServer) SetBlockServer(BlockServer) KeyServer() KeyServer SetKeyServer(KeyServer) KeybaseService() KeybaseService SetKeybaseService(KeybaseService) BlockSplitter() BlockSplitter SetBlockSplitter(BlockSplitter) Notifier() Notifier SetNotifier(Notifier) SetClock(Clock) ConflictRenamer() ConflictRenamer SetConflictRenamer(ConflictRenamer) MetadataVersion() kbfsmd.MetadataVer SetMetadataVersion(kbfsmd.MetadataVer) DefaultBlockType() keybase1.BlockType SetDefaultBlockType(blockType keybase1.BlockType) RekeyQueue() RekeyQueue SetRekeyQueue(RekeyQueue) // ReqsBufSize indicates the number of read or write operations // that can be buffered per folder ReqsBufSize() int // MaxNameBytes indicates the maximum supported size of a // directory entry name in bytes. MaxNameBytes() uint32 // MaxDirBytes indicates the maximum supported plaintext size of a // directory in bytes. MaxDirBytes() uint64 // DoBackgroundFlushes says whether we should periodically try to // flush dirty files, even without a sync from the user. Should // be true except for during some testing. DoBackgroundFlushes() bool SetDoBackgroundFlushes(bool) // RekeyWithPromptWaitTime indicates how long to wait, after // setting the rekey bit, before prompting for a paper key. RekeyWithPromptWaitTime() time.Duration SetRekeyWithPromptWaitTime(time.Duration) // PrefetchStatus returns the prefetch status of a block. PrefetchStatus(context.Context, tlf.ID, BlockPointer) PrefetchStatus // GracePeriod specifies a grace period for which a delayed cancellation // waits before actual cancels the context. This is useful for giving // critical portion of a slow remote operation some extra time to finish as // an effort to avoid conflicting. Example include an O_EXCL Create call // interrupted by ALRM signal actually makes it to the server, while // application assumes not since EINTR is returned. A delayed cancellation // allows us to distinguish between successful cancel (where remote operation // didn't make to server) or failed cancel (where remote operation made to // the server). However, the optimal value of this depends on the network // conditions. A long grace period for really good network condition would // just unnecessarily slow down Ctrl-C. // // TODO: make this adaptive and self-change over time based on network // conditions. DelayedCancellationGracePeriod() time.Duration SetDelayedCancellationGracePeriod(time.Duration) // QuotaReclamationPeriod indicates how often should each TLF // should check for quota to reclaim. If the Duration.Seconds() // == 0, quota reclamation should not run automatically. QuotaReclamationPeriod() time.Duration // QuotaReclamationMinUnrefAge indicates the minimum time a block // must have been unreferenced before it can be reclaimed. QuotaReclamationMinUnrefAge() time.Duration // QuotaReclamationMinHeadAge indicates the minimum age of the // most recently merged MD update before we can run reclamation, // to avoid conflicting with a currently active writer. QuotaReclamationMinHeadAge() time.Duration // ResetCaches clears and re-initializes all data and key caches. ResetCaches() // StorageRoot returns the path to the storage root for this config. StorageRoot() string // MetricsRegistry may be nil, which should be interpreted as // not using metrics at all. (i.e., as if UseNilMetrics were // set). This differs from how go-metrics treats nil Registry // objects, which is to use the default registry. MetricsRegistry() metrics.Registry SetMetricsRegistry(metrics.Registry) // SetTraceOptions set the options for tracing (via x/net/trace). SetTraceOptions(enabled bool) // TLFValidDuration is the time TLFs are valid before identification needs to be redone. TLFValidDuration() time.Duration // SetTLFValidDuration sets TLFValidDuration. SetTLFValidDuration(time.Duration) // BGFlushDirOpBatchSize returns the directory op batch size for // background flushes. BGFlushDirOpBatchSize() int // SetBGFlushDirOpBatchSize sets the directory op batch size for // background flushes. SetBGFlushDirOpBatchSize(s int) // BGFlushPeriod returns how long to wait for a batch to fill up // before syncing a set of changes to the servers. BGFlushPeriod() time.Duration // SetBGFlushPeriod sets how long to wait for a batch to fill up // before syncing a set of changes to the servers. SetBGFlushPeriod(p time.Duration) // Shutdown is called to free config resources. Shutdown(context.Context) error // CheckStateOnShutdown tells the caller whether or not it is safe // to check the state of the system on shutdown. CheckStateOnShutdown() bool // GetRekeyFSMLimiter returns the global rekey FSM limiter. GetRekeyFSMLimiter() *OngoingWorkLimiter } // NodeCache holds Nodes, and allows libkbfs to update them when // things change about the underlying KBFS blocks. It is probably // most useful to instantiate this on a per-folder-branch basis, so // that it can create a Path with the correct DirId and Branch name. type NodeCache interface { // GetOrCreate either makes a new Node for the given // BlockPointer, or returns an existing one. TODO: If we ever // support hard links, we will have to revisit the "name" and // "parent" parameters here. name must not be empty. Returns // an error if parent cannot be found. GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error) // Get returns the Node associated with the given ptr if one // already exists. Otherwise, it returns nil. Get(ref BlockRef) Node // UpdatePointer updates the BlockPointer for the corresponding // Node. NodeCache ignores this call when oldRef is not cached in // any Node. Returns whether the pointer was updated. UpdatePointer(oldRef BlockRef, newPtr BlockPointer) bool // Move swaps the parent node for the corresponding Node, and // updates the node's name. NodeCache ignores the call when ptr // is not cached. If newParent is nil, it treats the ptr's // corresponding node as being unlinked from the old parent // completely. If successful, it returns a function that can be // called to undo the effect of the move (or `nil` if nothing // needs to be done); if newParent cannot be found, it returns an // error and a `nil` undo function. Move(ref BlockRef, newParent Node, newName string) ( undoFn func(), err error) // Unlink set the corresponding node's parent to nil and caches // the provided path in case the node is still open. NodeCache // ignores the call when ptr is not cached. The path is required // because the caller may have made changes to the parent nodes // already that shouldn't be reflected in the cached path. It // returns a function that can be called to undo the effect of the // unlink (or `nil` if nothing needs to be done). Unlink(ref BlockRef, oldPath path, oldDe DirEntry) (undoFn func()) // IsUnlinked returns whether `Unlink` has been called for the // reference behind this node. IsUnlinked(node Node) bool // UnlinkedDirEntry returns a pointer to a modifiable directory // entry if `Unlink` has been called for the reference behind this // node. UnlinkedDirEntry(node Node) DirEntry // PathFromNode creates the path up to a given Node. PathFromNode(node Node) path // AllNodes returns the complete set of nodes currently in the cache. AllNodes() []Node } // fileBlockDeepCopier fetches a file block, makes a deep copy of it // (duplicating pointer for any indirect blocks) and generates a new // random temporary block ID for it. It returns the new BlockPointer, // and internally saves the block for future uses. type fileBlockDeepCopier func(context.Context, string, BlockPointer) ( BlockPointer, error) // crAction represents a specific action to take as part of the // conflict resolution process. type crAction interface { // swapUnmergedBlock should be called before do(), and if it // returns true, the caller must use the merged block // corresponding to the returned BlockPointer instead of // unmergedBlock when calling do(). If BlockPointer{} is zeroPtr // (and true is returned), just swap in the regular mergedBlock. swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains, unmergedBlock *DirBlock) (bool, BlockPointer, error) // do modifies the given merged block in place to resolve the // conflict, and potential uses the provided blockCopyFetchers to // obtain copies of other blocks (along with new BlockPointers) // when requiring a block copy. do(ctx context.Context, unmergedCopier fileBlockDeepCopier, mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock, mergedBlock *DirBlock) error // updateOps potentially modifies, in place, the slices of // unmerged and merged operations stored in the corresponding // crChains for the given unmerged and merged most recent // pointers. Eventually, the "unmerged" ops will be pushed as // part of a MD update, and so should contain any necessarily // operations to fully merge the unmerged data, including any // conflict resolution. The "merged" ops will be played through // locally, to notify any caches about the newly-obtained merged // data (and any changes to local data that were required as part // of conflict resolution, such as renames). A few things to note: // * A particular action's updateOps method may be called more than // once for different sets of chains, however it should only add // new directory operations (like create/rm/rename) into directory // chains. // * updateOps doesn't necessarily result in correct BlockPointers within // each of those ops; that must happen in a later phase. // * mergedBlock can be nil if the chain is for a file. updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer, unmergedBlock *DirBlock, mergedBlock *DirBlock, unmergedChains *crChains, mergedChains *crChains) error // String returns a string representation for this crAction, used // for debugging. String() string } // RekeyQueue is a managed queue of folders needing some rekey action taken // upon them by the current client. type RekeyQueue interface { // Enqueue enqueues a folder for rekey action. If the TLF is already in the // rekey queue, the error channel of the existing one is returned. Enqueue(tlf.ID) // IsRekeyPending returns true if the given folder is in the rekey queue. // Note that an ongoing rekey doesn't count as "pending". IsRekeyPending(tlf.ID) bool // Shutdown cancels all pending rekey actions and clears the queue. It // doesn't cancel ongoing rekeys. After Shutdown() is called, the same // RekeyQueue shouldn't be used anymore. Shutdown() } // RekeyFSM is a Finite State Machine (FSM) for housekeeping rekey states for a // FolderBranch. Each FolderBranch has its own FSM for rekeys. // // See rekey_fsm.go for implementation details. // // TODO: report FSM status in FolderBranchStatus? type RekeyFSM interface { // Event sends an event to the FSM. Event(event RekeyEvent) // Shutdown shuts down the FSM. No new event should be sent into the FSM // after this method is called. Shutdown() // listenOnEvent adds a listener (callback) to the FSM so that when // event happens, callback is called with the received event. If repeatedly // is set to false, callback is called only once. Otherwise it's called every // time event happens. // // Currently this is only used in tests and for RekeyFile. See comment for // RequestRekeyAndWaitForOneFinishEvent for more details. listenOnEvent( event rekeyEventType, callback func(RekeyEvent), repeatedly bool) } // BlockRetriever specifies how to retrieve blocks. type BlockRetriever interface { // Request retrieves blocks asynchronously. Request(ctx context.Context, priority int, kmd KeyMetadata, ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error // PutInCaches puts the block into the in-memory cache, and ensures that // the disk cache metadata is updated. PutInCaches(ctx context.Context, ptr BlockPointer, tlfID tlf.ID, block Block, lifetime BlockCacheLifetime, prefetchStatus PrefetchStatus) error // TogglePrefetcher creates a new prefetcher. TogglePrefetcher(enable bool, syncCh <-chan struct{}) <-chan struct{} }
1
18,514
This doesn't seem to be used anywhere, probably doesn't need to be a new interface.
keybase-kbfs
go
@@ -363,7 +363,9 @@ func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.Sta usageBytes, limitBytes, err := f.quotaUsage.Get(ctx, quotaUsageStaleTolerance) if err != nil { f.log.CDebugf(ctx, "Getting quota usage error: %v", err) - return err + // Ignore the error here so Statfs() can succeeded. Otherwise reading a + // public TLF while logged out fails on macOS. + return nil } total := getNumBlocksFromSize(uint64(limitBytes))
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libfuse import ( "net" "net/http" "net/http/pprof" "os" "runtime" "strconv" "strings" "sync" "time" "bazil.org/fuse" "bazil.org/fuse/fs" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" "github.com/keybase/kbfs/libfs" "github.com/keybase/kbfs/libkbfs" "github.com/pkg/errors" "golang.org/x/net/context" "golang.org/x/net/trace" ) // FS implements the newfuse FS interface for KBFS. type FS struct { config libkbfs.Config fuse *fs.Server conn *fuse.Conn log logger.Logger errLog logger.Logger // Protects debugServerListener and debugServer.addr. debugServerLock sync.Mutex debugServerListener net.Listener // An HTTP server used for debugging. Normally off unless // turned on via enableDebugServer(). debugServer *http.Server notifications *libfs.FSNotifications // remoteStatus is the current status of remote connections. remoteStatus libfs.RemoteStatus // this is like time.AfterFunc, except that in some tests this can be // overridden to execute f without any delay. execAfterDelay func(d time.Duration, f func()) root Root platformParams PlatformParams quotaUsage *libkbfs.EventuallyConsistentQuotaUsage } func makeTraceHandler(renderFn func(http.ResponseWriter, *http.Request, bool)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, req *http.Request) { any, sensitive := trace.AuthRequest(req) if !any { http.Error(w, "not allowed", http.StatusUnauthorized) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") renderFn(w, req, sensitive) } } // NewFS creates an FS func NewFS(config libkbfs.Config, conn *fuse.Conn, debug bool, platformParams PlatformParams) *FS { log := config.MakeLogger("kbfsfuse") // We need extra depth for errors, so that we can report the line // number for the caller of reportErr, not reportErr itself. errLog := log.CloneWithAddedDepth(1) if debug { // Turn on debugging. TODO: allow a proper log file and // style to be specified. log.Configure("", true, "") errLog.Configure("", true, "") } serveMux := http.NewServeMux() // Replicate the default endpoints from pprof's init function. serveMux.HandleFunc("/debug/pprof/", pprof.Index) serveMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) serveMux.HandleFunc("/debug/pprof/profile", pprof.Profile) serveMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) serveMux.HandleFunc("/debug/pprof/trace", pprof.Trace) // Replicate the default endpoints from net/trace's init function. serveMux.HandleFunc("/debug/requests", makeTraceHandler(func(w http.ResponseWriter, req *http.Request, sensitive bool) { trace.Render(w, req, sensitive) })) serveMux.HandleFunc("/debug/events", makeTraceHandler(trace.RenderEvents)) // Leave Addr blank to be set in enableDebugServer() and // disableDebugServer(). debugServer := &http.Server{ Handler: serveMux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } fs := &FS{ config: config, conn: conn, log: log, errLog: errLog, debugServer: debugServer, notifications: libfs.NewFSNotifications(log), platformParams: platformParams, quotaUsage: libkbfs.NewEventuallyConsistentQuotaUsage(config, "FS"), } fs.root.private = &FolderList{ fs: fs, folders: make(map[string]*TLF), } fs.root.public = &FolderList{ fs: fs, public: true, folders: make(map[string]*TLF), } fs.execAfterDelay = func(d time.Duration, f func()) { time.AfterFunc(d, f) } return fs } // tcpKeepAliveListener is copied from net/http/server.go, since it is // used in http.(*Server).ListenAndServe() which we want to emulate in // enableDebugServer. type tcpKeepAliveListener struct { *net.TCPListener } func (tkal tcpKeepAliveListener) Accept() (c net.Conn, err error) { tc, err := tkal.AcceptTCP() if err != nil { return } tc.SetKeepAlive(true) tc.SetKeepAlivePeriod(3 * time.Minute) return tc, nil } func (f *FS) enableDebugServer(ctx context.Context, port uint16) error { f.debugServerLock.Lock() defer f.debugServerLock.Unlock() if f.debugServer.Addr != "" { return errors.Errorf("Debug server already enabled at %s", f.debugServer.Addr) } addr := net.JoinHostPort("localhost", strconv.FormatUint(uint64(port), 10)) f.log.CDebugf(ctx, "Enabling debug http server at %s", addr) // Do Listen and Serve separately so we can catch errors with // the port (e.g. "port already in use") and return it. listener, err := net.Listen("tcp", addr) if err != nil { f.log.CDebugf(ctx, "Got error when listening on %s: %+v", addr, err) return err } f.debugServer.Addr = addr f.debugServerListener = tcpKeepAliveListener{listener.(*net.TCPListener)} // This seems racy because the spawned goroutine may be // scheduled to run after disableDebugServer is called. But // that's okay since Serve will error out immediately after // f.debugServerListener.Close() is called. go func(server *http.Server, listener net.Listener) { err := server.Serve(listener) f.log.Debug("Debug http server ended with %+v", err) }(f.debugServer, f.debugServerListener) return nil } func (f *FS) disableDebugServer(ctx context.Context) error { f.debugServerLock.Lock() defer f.debugServerLock.Unlock() if f.debugServer.Addr == "" { return errors.New("Debug server already disabled") } f.log.CDebugf(ctx, "Disabling debug http server at %s", f.debugServer.Addr) // TODO: Use f.debugServer.Close() or f.debugServer.Shutdown() // when we switch to go 1.8. err := f.debugServerListener.Close() f.log.CDebugf(ctx, "Debug http server shutdown with %+v", err) // Assume the close succeeds in stopping the server, even if // it returns an error. f.debugServer.Addr = "" f.debugServerListener = nil return err } // SetFuseConn sets fuse connection for this FS. func (f *FS) SetFuseConn(fuse *fs.Server, conn *fuse.Conn) { f.fuse = fuse f.conn = conn } // NotificationGroupWait - wait on the notification group. func (f *FS) NotificationGroupWait() { f.notifications.Wait() } func (f *FS) queueNotification(fn func()) { f.notifications.QueueNotification(fn) } // LaunchNotificationProcessor launches the notification processor. func (f *FS) LaunchNotificationProcessor(ctx context.Context) { f.notifications.LaunchProcessor(ctx) } // WithContext adds app- and request-specific values to the context. // libkbfs.NewContextWithCancellationDelayer is called before returning the // context to ensure the cancellation is controllable. // // It is called by FUSE for normal runs, but may be called explicitly in other // settings, such as tests. func (f *FS) WithContext(ctx context.Context) context.Context { id, errRandomReqID := libkbfs.MakeRandomRequestID() if errRandomReqID != nil { f.log.Errorf("Couldn't make request ID: %v", errRandomReqID) } // context.WithDeadline uses clock from `time` package, so we are not using // f.config.Clock() here start := time.Now() ctx, err := libkbfs.NewContextWithCancellationDelayer( libkbfs.NewContextReplayable(ctx, func(ctx context.Context) context.Context { ctx = context.WithValue(ctx, libfs.CtxAppIDKey, f) logTags := make(logger.CtxLogTags) logTags[CtxIDKey] = CtxOpID ctx = logger.NewContextWithLogTags(ctx, logTags) if errRandomReqID == nil { // Add a unique ID to this context, identifying a particular // request. ctx = context.WithValue(ctx, CtxIDKey, id) } if runtime.GOOS == "darwin" { // Timeout operations before they hit the osxfuse time limit, // so we don't hose the entire mount (Fixed in OSXFUSE 3.2.0). // The timeout is 60 seconds, but it looks like sometimes it // tries multiple attempts within that 60 seconds, so let's go // a little under 60/3 to be safe. // // It should be safe to ignore the CancelFunc here because our // parent context will be canceled by the FUSE serve loop. ctx, _ = context.WithDeadline(ctx, start.Add(19*time.Second)) } return ctx })) if err != nil { panic(err) // this should never happen } return ctx } func (f *FS) maybeStartTrace( ctx context.Context, family, title string) context.Context { // TODO: Add options to enable/disable tracing, or adjust // trace detail. tr := trace.New(family, title) ctx = trace.NewContext(ctx, tr) return ctx } func (f *FS) maybeFinishTrace(ctx context.Context, err error) { if tr, ok := trace.FromContext(ctx); ok { if err != nil { tr.LazyPrintf("err=%+v", err) tr.SetError() } tr.Finish() } } // Serve FS. Will block. func (f *FS) Serve(ctx context.Context) error { srv := fs.New(f.conn, &fs.Config{ WithContext: func(ctx context.Context, _ fuse.Request) context.Context { return f.WithContext(ctx) }, }) f.fuse = srv f.notifications.LaunchProcessor(ctx) f.remoteStatus.Init(ctx, f.log, f.config, f) // Blocks forever, unless an interrupt signal is received // (handled by libkbfs.Init). return srv.Serve(f) } // UserChanged is called from libfs. func (f *FS) UserChanged(ctx context.Context, oldName, newName libkb.NormalizedUsername) { f.log.CDebugf(ctx, "User changed: %q -> %q", oldName, newName) f.root.public.userChanged(ctx, oldName, newName) f.root.private.userChanged(ctx, oldName, newName) } var _ libfs.RemoteStatusUpdater = (*FS)(nil) var _ fs.FS = (*FS)(nil) var _ fs.FSStatfser = (*FS)(nil) func (f *FS) reportErr(ctx context.Context, mode libkbfs.ErrorModeType, err error) { if err == nil { f.errLog.CDebugf(ctx, "Request complete") return } f.config.Reporter().ReportErr(ctx, "", false, mode, err) // We just log the error as debug, rather than error, because it // might just indicate an expected error such as an ENOENT. // // TODO: Classify errors and escalate the logging level of the // important ones. f.errLog.CDebugf(ctx, err.Error()) } // Root implements the fs.FS interface for FS. func (f *FS) Root() (fs.Node, error) { return &f.root, nil } // QuotaUsageStaleTolerance is the lifespan of stale usage data that libfuse // accepts in the Statfs handler. In other words, this causes libkbfs to issue // a fresh RPC call if cached usage data is older than 10s. const quotaUsageStaleTolerance = 10 * time.Second // Statfs implements the fs.FSStatfser interface for FS. func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error { *resp = fuse.StatfsResponse{ Bsize: fuseBlockSize, Namelen: ^uint32(0), Frsize: fuseBlockSize, } usageBytes, limitBytes, err := f.quotaUsage.Get(ctx, quotaUsageStaleTolerance) if err != nil { f.log.CDebugf(ctx, "Getting quota usage error: %v", err) return err } total := getNumBlocksFromSize(uint64(limitBytes)) used := getNumBlocksFromSize(uint64(usageBytes)) resp.Blocks = total resp.Bavail = total - used resp.Bfree = total - used return nil } // Root represents the root of the KBFS file system. type Root struct { private *FolderList public *FolderList } var _ fs.NodeAccesser = (*FolderList)(nil) // Access implements fs.NodeAccesser interface for *Root. func (*Root) Access(ctx context.Context, r *fuse.AccessRequest) error { if int(r.Uid) != os.Getuid() && // Finder likes to use UID 0 for some operations. osxfuse already allows // ACCESS and GETXATTR requests from root to go through. This allows root // in ACCESS handler. See KBFS-1733 for more details. int(r.Uid) != 0 { // short path: not accessible by anybody other than root or the user who // executed the kbfsfuse process. return fuse.EPERM } if r.Mask&02 != 0 { return fuse.EPERM } return nil } var _ fs.Node = (*Root)(nil) // Attr implements the fs.Node interface for Root. func (*Root) Attr(ctx context.Context, a *fuse.Attr) error { a.Mode = os.ModeDir | 0500 return nil } var _ fs.NodeRequestLookuper = (*Root)(nil) // Lookup implements the fs.NodeRequestLookuper interface for Root. func (r *Root) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (_ fs.Node, err error) { r.log().CDebugf(ctx, "FS Lookup %s", req.Name) defer func() { r.private.fs.reportErr(ctx, libkbfs.ReadMode, err) }() specialNode := handleNonTLFSpecialFile( req.Name, r.private.fs, &resp.EntryValid) if specialNode != nil { return specialNode, nil } platformNode, err := r.platformLookup(ctx, req, resp) if platformNode != nil || err != nil { return platformNode, err } switch req.Name { case PrivateName: return r.private, nil case PublicName: return r.public, nil } // Don't want to pop up errors on special OS files. if strings.HasPrefix(req.Name, ".") { return nil, fuse.ENOENT } return nil, libkbfs.NoSuchFolderListError{ Name: req.Name, PrivName: PrivateName, PubName: PublicName, } } // PathType returns PathType for this folder func (r *Root) PathType() libkbfs.PathType { return libkbfs.KeybasePathType } var _ fs.NodeCreater = (*Root)(nil) // Create implements the fs.NodeCreater interface for Root. func (r *Root) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (_ fs.Node, _ fs.Handle, err error) { r.log().CDebugf(ctx, "FS Create") defer func() { r.private.fs.reportErr(ctx, libkbfs.WriteMode, err) }() return nil, nil, libkbfs.NewWriteUnsupportedError(libkbfs.BuildCanonicalPath(r.PathType(), req.Name)) } // Mkdir implements the fs.NodeMkdirer interface for Root. func (r *Root) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (_ fs.Node, err error) { r.log().CDebugf(ctx, "FS Mkdir") defer func() { r.private.fs.reportErr(ctx, libkbfs.WriteMode, err) }() return nil, libkbfs.NewWriteUnsupportedError(libkbfs.BuildCanonicalPath(r.PathType(), req.Name)) } var _ fs.Handle = (*Root)(nil) var _ fs.HandleReadDirAller = (*Root)(nil) // ReadDirAll implements the ReadDirAll interface for Root. func (r *Root) ReadDirAll(ctx context.Context) (res []fuse.Dirent, err error) { r.log().CDebugf(ctx, "FS ReadDirAll") defer func() { r.private.fs.reportErr(ctx, libkbfs.ReadMode, err) }() res = []fuse.Dirent{ { Type: fuse.DT_Dir, Name: PrivateName, }, { Type: fuse.DT_Dir, Name: PublicName, }, } if r.private.fs.platformParams.shouldAppendPlatformRootDirs() { res = append(res, platformRootDirs...) } if name := r.private.fs.remoteStatus.ExtraFileName(); name != "" { res = append(res, fuse.Dirent{Type: fuse.DT_File, Name: name}) } return res, nil } func (r *Root) log() logger.Logger { return r.private.fs.log }
1
16,502
This is a good start, but what do you think about just using `libkbfs.GetCurrentSessionIfPossible()` to avoid calling this altogether if there's no session? That way we can avoid continuous RTTs to the server when logged out.
keybase-kbfs
go
@@ -0,0 +1,18 @@ +<div class="dropdown edit_menu"> + <%= link_to link_text, edit_path, + :id => 'editanchor', + :class => "geolink tab #{link_class}" %> + <a class='dropdown-toggle' data-toggle='dropdown' href='#'> + <b class="caret"></b> + </a> + <ul class='dropdown-menu' role='menu' aria-labelledby='dLabel'> + <% Editors::RECOMMENDED_EDITORS.each do |editor| %> + <li> + <%= link_to t('layouts.edit_with', :editor => t("editor.#{editor}.description")), + edit_path(:editor => editor), + :data => { :editor => editor }, + :class => "geolink #{link_class}" %> + </li> + <% end %> + </ul> +</div>
1
1
8,695
This `aria-labelledby` stuff is all new to me, but my reading of the spec is that the value should be a list of element id values, and `dLabel` doesn't seem to be the id of any element?
openstreetmap-openstreetmap-website
rb
@@ -6,16 +6,16 @@ module Bolt class Step class Upload < Step def self.allowed_keys - super + Set['source', 'destination'] + super + Set['source', 'destination', 'upload'] end def self.required_keys - Set['destination', 'source', 'targets'] + Set['upload', 'destination', 'targets'] end def initialize(step_body) super - @source = step_body['source'] + @source = step_body['upload'] || step_body['source'] @destination = step_body['destination'] end
1
# frozen_string_literal: true module Bolt class PAL class YamlPlan class Step class Upload < Step def self.allowed_keys super + Set['source', 'destination'] end def self.required_keys Set['destination', 'source', 'targets'] end def initialize(step_body) super @source = step_body['source'] @destination = step_body['destination'] end def transpile code = String.new(" ") code << "$#{@name} = " if @name fn = 'upload_file' args = [@source, @destination, @targets] args << @description if @description code << function_call(fn, args) code << "\n" end end end end end end
1
15,357
Does this fail validation if you now use the `upload` key instead of `source`?
puppetlabs-bolt
rb
@@ -733,7 +733,7 @@ CmpStoredProc::ExecStatus CmpStoredProc::close(CmpSPDataObject&) // Methods of CmpISPFuncs // ----------------------------------------------------------------------- -NAList<CmpISPFuncs::ProcFuncsStruct> CmpISPFuncs::procFuncsArray_(256); +NAList<CmpISPFuncs::ProcFuncsStruct> CmpISPFuncs::procFuncsArray_(NULL,256); CmpISPFuncs::CmpISPFuncs() {
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ***************************************************************************** * * File: CmpStoredProc.C * Description: The implementation of the internal stored procedure related * classes. * * * Created: 03/14/97 * Language: C++ * * * * ***************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS #define SQLPARSERGLOBALS_NADEFAULTS #include "CmpStoredProc.h" #include "TrafDDLdesc.h" #include "parser.h" #include "str.h" #include "ElemDDLColDef.h" #include "CmpErrors.h" #include "CmpContext.h" #include "CmpMessage.h" // for CmpMessageISPRequest #include "ComDiags.h" #include "CmpDescribe.h" // for sendAllControls #include "CharType.h" #include "NumericType.h" #include "DatetimeType.h" #include "DTICommonType.h" // for DatetimeIntervalCommonType #include "ItemColRef.h" // for ConstValue #include "ItemNAType.h" // for NATypeToItem #include "ItemOther.h" // for ItemList #include "NAExit.h" #include "NAMemory.h" #include "NAString.h" #include "ParserMsg.h" #include "StoredProcInterface.h" #include <time.h> // timestamp to generate a unique table name #include <memory.h> // for memset #include "SqlParserGlobalsCmn.h" // helper routines used in this cpp file static const char emptyString[] = ""; inline static char *copyString(const NAString &s) // cf. readRealArk.cpp { #pragma nowarn(1506) // warning elimination char *c = new(CmpCommon::statementHeap()) char[s.length()+1]; str_cpy_all(c, s.data(), s.length()+1); return c; #pragma warn(1506) // warning elimination } static void CmpSPERROR2Diags(const SP_ERROR_STRUCT* spError, ComDiagsArea* diags) { if ( !diags ) return; if (spError[0].error == arkcmpErrorISPMergeCatDiags) { // Special error message to indicate that errors should really be // merged in from the catalog manager diags area. // not supported, raise unsupported error instead *diags << DgSqlCode(-4222) << DgString0("CmpSPERROR2Diags"); return; } for ( Int32 i=0; i< SP_MAX_ERROR_STRUCTS; i++) { const SP_ERROR_STRUCT* pSET = &(spError[i]); if (pSET->error) { *diags << DgSqlCode(pSET->error); if(pSET->error == -20067) { //IDS_UTILITIES_BADSYNTAX = 20067 // If utilities parser returned syntax error for syntax based // utilities, error IDS_UTILITIES_BADSYNTAX is returned by // utilities code to mxmcp. // pSET->optionalString[1] has the utilities command from command line. // pSET->optionalInteger[0] has the approximate position of error in // the syntax of the utilities command given by user in command line. // The approximate error position is returned by utilities parser. // Function StoreSyntaxError(.....) takes the information to put a // caret (^) in the approximate position of the command and the // information is saved in diags. StoreSyntaxError(pSET->optionalString[1], // const char * input_str pSET->optionalInteger[0], // Int32 input_pos *diags, // ComDiagsArea & diagsArea 0, // Int32 dgStrNum CharInfo::UTF8, // CharInfo::CharSet input_str_cs CharInfo::UTF8); // CharInfo::CharSet terminal_cs } else{ if ( pSET->optionalString[0] && pSET->optionalString[0] != (char *) ComDiags_UnInitialized_Int ) *diags << DgString0(pSET->optionalString[0]); if ( pSET->optionalString[1] && pSET->optionalString[1] != (char *)ComDiags_UnInitialized_Int ) *diags << DgString1(pSET->optionalString[1]); if ( pSET->optionalString[2] && pSET->optionalString[2] != (char *)ComDiags_UnInitialized_Int ) *diags << DgString2(pSET->optionalString[2]); if ( pSET->optionalString[3] && pSET->optionalString[3] != (char *)ComDiags_UnInitialized_Int ) *diags << DgString3(pSET->optionalString[3]); if ( pSET->optionalString[4] && pSET->optionalString[4] != (char *)ComDiags_UnInitialized_Int ) *diags << DgString4(pSET->optionalString[4]); if ( pSET->optionalInteger[0] != ComDiags_UnInitialized_Int ) *diags << DgInt0(pSET->optionalInteger[0]); if ( pSET->optionalInteger[1] != ComDiags_UnInitialized_Int ) *diags << DgInt1(pSET->optionalInteger[1]); if ( pSET->optionalInteger[2] != ComDiags_UnInitialized_Int ) *diags << DgInt2(pSET->optionalInteger[2]); if ( pSET->optionalInteger[3] != ComDiags_UnInitialized_Int ) *diags << DgInt3(pSET->optionalInteger[3]); if ( pSET->optionalInteger[4] != ComDiags_UnInitialized_Int ) *diags << DgInt4(pSET->optionalInteger[4]); } } else if ( i==0 ) { // this is the case that ISP implementation does not return any // SP_ERROR_STRUCT info back, but does return with error status. *diags << DgSqlCode(arkcmpErrorISPNoSPError); break; } else // no more SP_ERROR_STRUCT. break; } } // ----------------------------------------------------------------------- // helper routines used by both CmpSPInputFormat and CmpSPOutputFormat // ----------------------------------------------------------------------- // This routine converts a field description in SP_FIELDDESC_STRUCT // into an ElemDDLColDef. Error will be put into context static ElemDDLColDef* SPFieldDesc2ElemDDLColDef(SP_FIELDDESC_STRUCT* fd, CmpContext* context) { ElemDDLColDef* elemDDL=0; if (!fd) return 0; Parser parser(context); if ( !(elemDDL = parser.parseColumnDefinition(fd->COLUMN_DEF, strlen(fd->COLUMN_DEF), CharInfo::UTF8 ))) { return 0; // the error is put into context_ diags by parser already } return elemDDL; } // ----------------------------------------------------------------------- // Methods of CmpSPInputFormat // ----------------------------------------------------------------------- CmpSPInputFormat::CmpSPInputFormat(CmpContext* context) { context_ = context; inputType_ = 0; } CmpSPInputFormat::~CmpSPInputFormat() { } NABoolean CmpSPInputFormat::SetFormat(Lng32 ncols, SP_FIELDDESC_STRUCT* fields) { SP_FIELDDESC_STRUCT* fd = fields; ElemDDLColDef* elem=0; for (Int32 i=0; i < ncols; i++, fd++) { if ( !( elem = SPFieldDesc2ElemDDLColDef(fd, context_) ) ) return FALSE; ItemExpr* item = new((CollHeap*)context_->statementHeap()) NATypeToItem((NAType*)elem->getColumnDataType()); if (inputType_) inputType_ = new((CollHeap*)context_->statementHeap()) ItemList(inputType_, item); else inputType_ = item; } return TRUE; } // ----------------------------------------------------------------------- // Methods of CmpSPOutputFormat // ----------------------------------------------------------------------- CmpSPOutputFormat::CmpSPOutputFormat(CmpContext* context) { context_ = context; } NABoolean CmpSPOutputFormat::SetFormat(Lng32 nCols, const char* tableName, SP_FIELDDESC_STRUCT* fields, Lng32 nKeys, SP_KEYDESC_STRUCT *keys) { // To convert the SP_FIELDDESC_STRUCT into columns_desc, // 1. call the parser to parse the column definition. // 2. The ElemDDLColDef is used to generate columns_desc. // 3. Refer to catman/CatExecCreateTable.C/CatBuildColumnList // to convert ElemDDLColDefArray to CatColumnList. // 4. Refer to sqlcat/readRealArk.C/convertColumns to convert // CatColumnList to columns_desc. // Note : 03/18/97. // It is not a good practice to clone the conversion code. The // correct way to handle this is to call CatBuildColumnList and // convertColumns since it will conforms to however catman processes // the DDL queries from StmtDDLCreateTable RelExpr. But in // CatBuildColumnList routine, the CatRWBaseTable is created which // involves the SOL cache routines and it will check the security for // creating tables and a whole lot more logics deep in catman. // copying the code is much easier. ( Ya, I'm copying // the code and blame it to someone else :-) ). // Since this is for the internally developed stored // procedures, once there are changes in the data types supported and // requires changes in CatBuildColumnList or convertColumns routines. // The following conversion code needs to be changed accordingly. // Currently (03/29/97), catman has routines ( internal usage only ) // to generate the in memory virtual table structure without going // through SOL. It is used for index table ( i.e. with type E_INDEX ) // Once this interface is externalized, it should be used here to // go through catman code to get desc structures. nCols_ = nCols; // set up tableDesc_, allocate the storage from allocate routine, // so the delete of the tableDesc_ could be done correctly. // As in sqlcat/desc.h TrafAllocateDDLdesc(), allocate routine gets // the storage from HEAP ( i.e. CmpCommon::statementHeap() ). // Likewise, copyString should be the same implementation as in // readRealArk.cpp. It was externalized originally, but being put as // static function by someone. Currently the code copyString is cloned // in the beginning of the file. ( which will allocate the memory from // statementHeap ). TrafDesc* table_desc = TrafAllocateDDLdesc(DESC_TABLE_TYPE, NULL); tableDesc_ = table_desc; char* objName = copyString(NAString(tableName)); table_desc->tableDesc()->tablename = objName; table_desc->tableDesc()->record_length = 0; // to be set later in generator. table_desc->tableDesc()->colcount = (Int32)nCols; TrafDesc * files_desc = TrafAllocateDDLdesc(DESC_FILES_TYPE, NULL); table_desc->tableDesc()->files_desc = files_desc; // populate the TrafColumnsDesc TrafDesc* prev_col_desc = 0; TrafDesc* first_col_desc = 0; for ( Int32 i=0; i < nCols; i++ ) { TrafDesc* column_desc = TrafAllocateDDLdesc(DESC_COLUMNS_TYPE, NULL); if (prev_col_desc) prev_col_desc->next = column_desc; else first_col_desc = column_desc; prev_col_desc = column_desc; if ( !getColumnDesc( &(fields[i]), (column_desc->columnsDesc()))) { *(context_->diags()) << DgSqlCode(arkcmpErrorISPFieldDef); tableDesc_ = 0; // since HEAP is from statement heap, it will // be removed automatically at the end of statement. return FALSE; } column_desc->columnsDesc()->colnumber = i; } table_desc->tableDesc()->columns_desc = first_col_desc; // populate index_desc and key_desc TrafDesc* keys_desc = 0; if ( !getKeysDesc( nKeys, keys, keys_desc) ) { *(context_->diags()) << DgSqlCode(arkcmpErrorISPFieldDef); tableDesc_ = 0; return FALSE; } TrafDesc * index_desc = TrafAllocateDDLdesc(DESC_INDEXES_TYPE, NULL); index_desc->indexesDesc()->tablename = objName; index_desc->indexesDesc()->indexname = objName; index_desc->indexesDesc()->keytag = 0; // primary index index_desc->indexesDesc()->record_length = table_desc->tableDesc()->record_length; index_desc->indexesDesc()->colcount = table_desc->tableDesc()->colcount; index_desc->indexesDesc()->blocksize = 4096; // anything > 0 // cannot simply point to same files desc as the table one, // because then ReadTableDef::deleteTree frees same memory twice (error) TrafDesc * i_files_desc = TrafAllocateDDLdesc(DESC_FILES_TYPE, NULL); index_desc->indexesDesc()->files_desc = i_files_desc; index_desc->indexesDesc()->keys_desc = keys_desc; table_desc->tableDesc()->indexes_desc = index_desc; return TRUE; } CmpSPOutputFormat::~CmpSPOutputFormat() { } NABoolean CmpSPOutputFormat::getColumnDesc(SP_FIELDDESC_STRUCT* fDesc, TrafColumnsDesc* colsDesc ) { ElemDDLColDef* elemDDL; if (! (elemDDL=SPFieldDesc2ElemDDLColDef(fDesc, context_)) ) return FALSE; else { // convert ElemDDLColViewDef into CatColumn, then column_desc if ( !ElemDDLColDef2ColumnDescStruct (elemDDL, tableDesc_->tableDesc()->tablename, colsDesc) ) return FALSE; } return TRUE; } NABoolean CmpSPOutputFormat::getKeysDesc(Lng32 nKeys, SP_KEYDESC_STRUCT* keys, TrafDesc* &keysDesc) { // key is not supported yet in FCS ( Sep. 97 release ) needs to rework on this. TrafDesc* prev_key_desc = 0; TrafDesc* first_key_desc = 0; for ( Int32 i=0; i < nKeys; i++ ) { TrafDesc* key_desc = TrafAllocateDDLdesc(DESC_KEYS_TYPE, NULL); if (prev_key_desc) prev_key_desc->next = key_desc; else first_key_desc = key_desc; prev_key_desc = key_desc; key_desc->keysDesc()->keyseqnumber = i; // TODO, find out the tablecolnumber from column name ???? key_desc->keysDesc()->tablecolnumber = 0; key_desc->keysDesc()->setDescending(FALSE); } keysDesc = first_key_desc; return TRUE; } // ##TODO: this code might be better put into ReadTableDef or readRealArk.cpp // ##For now for new data types, etc. this piece will be changed accordingly. NABoolean CmpSPOutputFormat::ElemDDLColDef2ColumnDescStruct (ElemDDLColDef* elem, const char* tableName, TrafColumnsDesc* colDesc) { // Just copy the pointer for this name -- // no need to alloc + strcpy a la copyString // colDesc->tablename = (char *)tableName; colDesc->colname = copyString(elem->getColumnName()); // colDesc->colnumber, filled outside NAType* genericType = elem->getColumnDataType(); colDesc->datatype = genericType->getFSDatatype(); colDesc->length = genericType->getNominalSize(); colDesc->pictureText = (char *)emptyString; // The logic for converting from an NAType to column_desc is also // in the catman code -- readRealArk.cpp::convertColumn(). Any changes // there must be reflected here as well and vice versa. if ( genericType->getTypeQualifier() == NA_NUMERIC_TYPE ) { NumericType & nt = *((NumericType*)genericType); colDesc->scale = nt.getScale(); // if this is a float (real or double) datatype, // then get the precision. Otherwise, for other // numeric type, get the precision only if it // is not binary precision. if(nt.isExact() && nt.binaryPrecision() && (nt.getFSDatatype() != REC_BPINT_UNSIGNED)) { colDesc->precision = 0; } else { colDesc->precision = nt.getPrecision(); } } else { colDesc->scale = 0; colDesc->precision = 0; } #pragma nowarn(1506) // warning elimination if ( genericType->getTypeQualifier() == NA_CHARACTER_TYPE ) { CharType & charType = (CharType &) *genericType; colDesc->character_set = charType.getCharSet(); colDesc->encoding_charset = charType.getEncodingCharSet(); colDesc->collation_sequence = charType.getCollation(); colDesc->setUpshifted(charType.isUpshifted()); } if ( genericType->getTypeQualifier() == NA_DATETIME_TYPE || genericType->getTypeQualifier() == NA_INTERVAL_TYPE ) { DatetimeIntervalCommonType& dti = (DatetimeIntervalCommonType& )*genericType; colDesc->datetimestart = dti.getStartField(); colDesc->datetimeend = dti.getEndField(); colDesc->datetimefractprec = dti.getFractionPrecision(); colDesc->intervalleadingprec = dti.getLeadingPrecision(); } #pragma warn(1506) // warning elimination // offset, to be done (do we need it?) colDesc->setNullable(NOT elem->isNotNullConstraintSpecified()); colDesc->colclass = 'U'; colDesc->setAdded(FALSE); // defaultclass, to be done? (not referenced, not needed) ConstValue *pDefVal = (ConstValue*)elem->getDefaultValueExpr(); if (!pDefVal || pDefVal->isNull()) colDesc->defaultvalue = NULL; else colDesc->defaultvalue = copyString(pDefVal->getConstStr()); colDesc->colFlags = 0; return TRUE; } // ----------------------------------------------------------------------- // Methods of CmpSPExecDataItem // ----------------------------------------------------------------------- CmpSPExecDataItem::CmpSPExecDataItem(ULng32 exprSize, void* expr, ULng32 dataSize, void* data, CmpContext* context) { exprSize_ = exprSize; expr_ = expr; dataSize_ = dataSize; data_ = data; context_ = context; SPFuncsDiags_ = ComDiagsArea::allocate(context->heap()); } CmpSPExecDataItem::~CmpSPExecDataItem() { if (SPFuncsDiags_) SPFuncsDiags_->decrRefCount(); } CollHeap* CmpSPExecDataItem::wHeap() { return context_->heap(); } // ----------------------------------------------------------------------- // Methods of CmpISPDataItemInput // ----------------------------------------------------------------------- CmpSPExecDataItemInput::CmpSPExecDataItemInput(ULng32 exprSize, void* expr, ULng32 dataSize, void* data, CmpContext* context) : CmpSPExecDataItem(exprSize, expr, dataSize, data, context) { CMPASSERT(ExSPPrepareInputBuffer(data_) == 0); CMPASSERT(ExSPPosition(data_) == 0); currentRow_ = 0; rowLength_ = 0; control_ = 0; } CmpSPExecDataItemInput::~CmpSPExecDataItemInput() { } short CmpSPExecDataItemInput::next() { short status = 0; status = ExSPGetInputRow(data_, control_, currentRow_, rowLength_); CMPASSERT(status != -1); return status; } // ----------------------------------------------------------------------- // Methods of CmpISPDataItemReply // ----------------------------------------------------------------------- CmpSPExecDataItemReply::CmpSPExecDataItemReply(ULng32 exprSize, void* expr, ULng32 dataRowSize, ULng32 dataTotalSize, CmpSPExecDataItemInput* inputData, CollHeap* outHeap, CmpContext* context) : CmpSPExecDataItem(exprSize, expr, dataTotalSize, 0, context) { // allocate the reply buffer outHeap_ = outHeap; // data_ has to come from outHeap, because it will be taken away to be sent back to // executor. data_ = new(outHeap) char[dataTotalSize]; rowLength_ = dataRowSize; // passed in from executor rowBuffer_ = new(context->statementHeap()) char[rowLength_]; memset(rowBuffer_, 0, rowLength_); CMPASSERT(ExSPInitReplyBuffer(data_, dataTotalSize) == 0); inputData_ = inputData; rowExist_ = FALSE; diagsExist_ = FALSE; EORExist_ = FALSE; } CmpSPExecDataItemReply::~CmpSPExecDataItemReply() { NADELETEBASIC((char *)data_, outHeap_); NADELETEBASIC((char *)rowBuffer_, context_->statementHeap()); diags_.clear(); // this is to avoid memory leak. } NABoolean CmpSPExecDataItemReply::prepare() { return ( ExSPPrepareReplyBuffer(data_) == 0 ); } NABoolean CmpSPExecDataItemReply::MoveDiags(const SP_ERROR_STRUCT* error, const SP_STATUS spStatus) { // Copy the error information from SP_ERROR_STRUCT into diags_ diagsExist_ = TRUE; EORExist_ = TRUE; diags_.clear(); CmpSPERROR2Diags(error, &diags_); return TRUE; } NABoolean CmpSPExecDataItemReply::MergeDiags(ComDiagsArea* pDiags) { if (pDiags == NULL || pDiags->getNumber() <= 0) return FALSE; // merge the warning and error information from *pDiags into diags_ diagsExist_ = TRUE; EORExist_ = TRUE; diags_.mergeAfter(*pDiags); return TRUE; } short CmpSPExecDataItemReply::AddARow() { // set the rowExist_ flag to be TRUE // call the executor provided routines to move the data into buffer to be sent // back to executor. // if moved in successfully, reset the rowExist_ flag. // otherwise, return with buffer full and wait for the next GetNext request from // executor with enough buffer size to fill the row. rowExist_ = TRUE; short status = 0; status = ExSPPutReplyRow(data(), inputData_->control(), (char*)rowBuffer_, rowLength_, 0); if ( status == 1 ) // buffer is full return 1; memset(rowBuffer_, 0, rowLength_); rowExist_ = FALSE; return status; } short CmpSPExecDataItemReply::AddEOR() { // if diagsExist_, sent back the diags info along with end of row indicator // if moved in successfully, reset the diagsExist_ and EORExist_ flag. // if buffer is full, just return 1, wait for executor to send enough buffer. short status= 0; EORExist_ = TRUE; if (diagsExist_) { status = ExSPPutReplyRow(data(), inputData_->control(), 0, 0, &diags_); if ( status == 1) // buffer is full return 1; diagsExist_ = FALSE; diags_.clear(); } if (status != 2) // In case of warning we have already returned an EOD status = ExSPPutReplyRow(data(), inputData_->control(), 0, 0, 0); if ( status == 1 ) // buffer is full return 1; EORExist_ = FALSE; return status; } void CmpSPExecDataItemReply::allocateData(ULng32 size) { NADELETEBASIC((char *)data_, outHeap_); data_ = new(outHeap_) char[size]; CMPASSERT(ExSPInitReplyBuffer(data_, size) == 0); } // ----------------------------------------------------------------------- // Methods of CmpISPDataObject // ----------------------------------------------------------------------- CmpISPDataObject::CmpISPDataObject(CmpMessageISPRequest* req, CmpInternalSP* isp, CollHeap* outheap, CmpContext* context) : input_(req->inputExprSize(), (void*)req->inputExpr(), req->inputDataSize(), (void*)req->inputData(), context), key_(req->keyExprSize(), (void*)req->keyExpr(), req->keyDataSize(), (void*)req->keyData(), context), output_(req->outputExprSize(), (void*)req->outputExpr(), req->outputRowSize(), req->outputTotalSize(),&input_,outheap,context), outHeap_(outheap), context_(context) { isp->SetCmpISPDataObject(this); ispRequest_ = req; CMPASSERT( ExSPUnpackIOExpr(input_.expr(), output_.expr(), context->statementHeap()) == 0 ); } CmpISPDataObject::~CmpISPDataObject() { if (ispRequest_) ispRequest_->decrRefCount(); } // ----------------------------------------------------------------------- // Methods of CmpStoredProc // ----------------------------------------------------------------------- CmpStoredProc::CmpStoredProc(const NAString& procName, CmpContext* cmpContext) : procName_(procName) { cmpContext_ = cmpContext; } CmpStoredProc::~CmpStoredProc() { } NABoolean CmpStoredProc::InputFormat(Lng32, CmpSPInputFormat&) { return FALSE; } NABoolean CmpStoredProc::OutputFormat(CmpSPOutputFormat&) { return FALSE; } CmpStoredProc::ExecStatus CmpStoredProc::open(CmpSPDataObject&) { return FAIL; } CmpStoredProc::ExecStatus CmpStoredProc::fetch(CmpSPDataObject&) { return FAIL; } CmpStoredProc::ExecStatus CmpStoredProc::close(CmpSPDataObject&) { return FAIL; } // ----------------------------------------------------------------------- // Methods of CmpISPFuncs // ----------------------------------------------------------------------- NAList<CmpISPFuncs::ProcFuncsStruct> CmpISPFuncs::procFuncsArray_(256); CmpISPFuncs::CmpISPFuncs() { } CmpISPFuncs::~CmpISPFuncs() { } NABoolean CmpISPFuncs::ValidPFuncs (const ProcFuncsStruct& pFuncs) const { return ( pFuncs.procName_ && pFuncs.compileFunc_ && pFuncs.inFormatFunc_ && pFuncs.outNumFormatFunc_ && pFuncs.outFormatFunc_ && pFuncs.procFunc_ ); } Int32 CmpISPFuncs::RegFuncs( const char* procName, //null terminated SP_COMPILE_FUNCPTR compileFunc, SP_INPUTFORMAT_FUNCPTR inFormatFunc, SP_PARSE_FUNCPTR parseFunc, SP_NUM_OUTPUTFIELDS_FUNCPTR outNumFormatFunc, SP_OUTPUTFORMAT_FUNCPTR outFormatFunc, SP_PROCESS_FUNCPTR procFunc, SP_HANDLE spHandle, const char* version) { if ( strcmp(version, CMPISPVERSION) != 0 ) { ABORT ( "arkcmp: The ISP interface version is not compatible" ); NAExit(1); } procFuncsArray_.insert(ProcFuncsStruct(procName, compileFunc, inFormatFunc, parseFunc, outNumFormatFunc, outFormatFunc, procFunc, spHandle)); return 1; } // ----------------------------------------------------------------------- // Methods of CmpInternalSP // ----------------------------------------------------------------------- CmpInternalSP::CmpInternalSP(const NAString& name, CmpContext* context) : CmpStoredProc(name, context), compHandle_(0), procHandle_(0), state_(NONE), ispData_(0) { for ( Int32 j=0; j < SP_MAX_ERROR_STRUCTS; j++ ) { for ( Int32 i=0; i < SP_ERROR_MAX_OPTIONAL_STRINGS; i++ ) spError_[j].optionalString[i] = new((CollHeap*)cmpContext()->statementHeap()) char[SP_STRING_MAX_LENGTH]; } initSP_ERROR_STRUCT(); } CmpInternalSP::~CmpInternalSP() { if ( state_ == COMPILE ) { // needs to call the compileFunc_ initSP_ERROR_STRUCT(); if ( (*(procFuncs_.compileFunc_)) (SP_COMP_EXIT, &compHandle_, procFuncs_.spHandle_, &spError_[0]) !=SP_SUCCESS) appendDiags(); } // if not being properly closed, could be an exception happened, // close the process properly if ( state_ == PROCESS ) { close(*ispData_); // When it gets here, there must be an assertion happened before. // To pass back the error infomation from SP close interface routine, // the CoMDiags is moved into the context_ diags. cmpContext()->diags()->mergeAfter(ispData_->output()->diags()); } delete ispData_; for ( Int32 j=0; j < SP_MAX_ERROR_STRUCTS; j++ ) { for (Int32 i=0; i < SP_ERROR_MAX_OPTIONAL_STRINGS; i++) NADELETEBASIC(spError_[j].optionalString[i], cmpContext()->statementHeap()); } } // helper routines used internally static NAString timeStamp() { time_t tp; tp = time(NULL); char str[256]; sprintf(str, "%u", (UInt32)tp); // take lower 4 bytes as unique value return NAString(str); } NAString CmpInternalSP::OutTableName() { // To make the OutTableName unique, put in the timestamp // Note the timeStamp() above returns just lower 4 bytes of seconds since // 01/01/1970 00:00:00, so it may not be unique if we get here to soon. NAString tableName = "SPTableOut" + procName() + timeStamp(); return tableName; } NABoolean CmpInternalSP::startCompileCycle() { procFuncs_ = procFuncsLookupTable_[procName()]; if (!procFuncsLookupTable_.ValidPFuncs(procFuncs_)) { *(cmpContext()->diags()) << DgSqlCode(arkcmpErrorISPNotFound) << DgString0(procName().data()); return FALSE; } if ( state_ != COMPILE ) { // needs to call the compileFunc_ initSP_ERROR_STRUCT(); if ( (*(procFuncs_.compileFunc_)) (SP_COMP_INIT, &compHandle_, procFuncs_.spHandle_, &spError_[0])!= SP_SUCCESS ) { appendDiags(); return FALSE; } } state_ = COMPILE; return TRUE; } SP_FIELDDESC_STRUCT* CmpInternalSP::allocSP_FIELDDESC_STRUCT(Lng32 num) { // The SP_FIELDDESC_STRUCT is allocated using new, not the overloaded one // because it is an array of struct to the SP interface function. So it can // not be derived from NABasicObject. Since the caller is not supposed to // jump out of the routine before it returns, it will not cause memory leak. SP_FIELDDESC_STRUCT* fd = 0; if (num) { fd = new SP_FIELDDESC_STRUCT[num]; memset(fd, 0, sizeof(SP_FIELDDESC_STRUCT) * (Int32)num); } return fd; } void CmpInternalSP::deleteSP_FIELDDESC_STRUCT(SP_FIELDDESC_STRUCT* fd) { delete[] fd; } SP_KEYDESC_STRUCT* CmpInternalSP::allocSP_KEYDESC_STRUCT(Lng32 num) { // The SP_KEYDESC_STRUCT is allocated using new, not the overloaded one // because it is an array of struct to the SP interface function. So it can // not be derived from NABasicObject. Since the caller is not supposed to // jump out of the routine before it returns, it will not cause memory leak. SP_KEYDESC_STRUCT* kd = 0; if (num) { kd = new SP_KEYDESC_STRUCT[num]; memset(kd, 0, sizeof(SP_KEYDESC_STRUCT) * (Int32)num); } return kd; } void CmpInternalSP::deleteSP_KEYDESC_STRUCT(SP_KEYDESC_STRUCT* kd) { delete[] kd; } void CmpInternalSP::initSP_ERROR_STRUCT() { for ( Int32 j=0; j<SP_MAX_ERROR_STRUCTS; j++ ) { spError_[j].error = 0; for ( Int32 i=0; i<SP_ERROR_MAX_OPTIONAL_STRINGS; i++) *(spError_[j].optionalString[i]) = '\0'; for ( Int32 k=0; k<SP_ERROR_MAX_OPTIONAL_INTS; k++ ) spError_[j].optionalInteger[k] = ComDiags_UnInitialized_Int; } } void CmpInternalSP::appendDiags() { CmpSPERROR2Diags(spError_, cmpContext()->diags()); } // virtual methods for interface between arkcmp ( compiler ) and // stored procedure implementation. NABoolean CmpInternalSP::InputFormat(Lng32 num, CmpSPInputFormat& t) { NABoolean retStatus = TRUE; if ( !startCompileCycle() ) return FALSE; SP_FIELDDESC_STRUCT* fields = allocSP_FIELDDESC_STRUCT(num); initSP_ERROR_STRUCT(); if ( (*(procFuncs_.inFormatFunc_)) (fields, num, compHandle_, procFuncs_.spHandle_, &spError_[0]) == SP_SUCCESS ) { if (!(t.SetFormat(num, fields) ) ) retStatus = FALSE; } else { retStatus = FALSE; appendDiags(); } if ( !retStatus ) *(cmpContext()->diags()) << DgSqlCode(arkcmpErrorISPFieldDef); // It is not supposed to jump out of this function before this delete. // This has been agreed on the interface between arkcmp/server and ISP. deleteSP_FIELDDESC_STRUCT(fields); return retStatus; } NABoolean CmpInternalSP::OutputFormat(CmpSPOutputFormat& t) { NABoolean retStatus = TRUE; CMPASSERT(state_ == COMPILE); initSP_ERROR_STRUCT(); Lng32 num; NAString outTableName = OutTableName(); if ( (*(procFuncs_.outNumFormatFunc_))( &num, compHandle_, procFuncs_.spHandle_, &spError_[0]) == SP_SUCCESS ) { if ( num == 0 ) { // Create a dummy column if there is no output, since otherwise // NATable generated later on will break later in binder. SP_FIELDDESC_STRUCT fields; strcpy(fields.COLUMN_DEF, "Dummy1 int"); if ( !(t.SetFormat(1, outTableName.data(), &fields, 0, 0) ) ) retStatus = FALSE; } else { SP_FIELDDESC_STRUCT* fields = allocSP_FIELDDESC_STRUCT(num); SP_KEYDESC_STRUCT* keys = allocSP_KEYDESC_STRUCT(num); Lng32 nKeys = 0; initSP_ERROR_STRUCT(); if ( (*(procFuncs_.outFormatFunc_)) (fields, keys, &nKeys, compHandle_, procFuncs_.spHandle_, &spError_[0]) == SP_SUCCESS ) { if (!(t.SetFormat(num, outTableName.data(), fields, nKeys, keys ) ) ) { retStatus = FALSE; } } else { appendDiags(); retStatus = FALSE; } // It is not supposed to jump out of this function before this // delete[] deleteSP_FIELDDESC_STRUCT(fields); deleteSP_KEYDESC_STRUCT(keys); } } else { appendDiags(); retStatus = FALSE; } if ( !retStatus ) *(cmpContext()->diags()) << DgSqlCode(arkcmpErrorISPFieldDef); return retStatus; } NABoolean CmpInternalSP::ParseInput(const NAString& param) { CMPASSERT(state_ == COMPILE); if ( !(procFuncs_.parseFunc_) ) { // an optional function to call return TRUE; } // test code, for CMPASSERT, check SPUtil.cpp SP_ERROR_* // for more details. // CMPASSERT( strcmp((char*)param.data(), "TestCMPASSERT") != 0 ); initSP_ERROR_STRUCT(); if ( (*(procFuncs_.parseFunc_))((char*)param.data(), compHandle_, procFuncs_.spHandle_, &spError_[0]) != SP_SUCCESS ) { appendDiags(); return FALSE; } return TRUE; } //CmpStoredProc::Status CmpInternalSP::open(CmpISPDataObject& data) CmpStoredProc::ExecStatus CmpInternalSP::open(CmpISPDataObject& data) { CMPASSERT(state_ == NONE); procFuncs_ = procFuncsLookupTable_[procName()]; if (!procFuncsLookupTable_.ValidPFuncs(procFuncs_)) { *(cmpContext()->diags()) << DgSqlCode(arkcmpErrorISPNotFound) << DgString0(procName().data()); return FAIL; } initSP_ERROR_STRUCT(); SP_STATUS spStatus = (*(procFuncs_.procFunc_))( SP_PROC_OPEN, (SP_ROW_DATA)data.input(), CmpSPExtractFunc, (SP_ROW_DATA)data.output(), CmpSPFormatFunc, (SP_KEY_VALUE)data.key(), CmpSPKeyValueFunc, &procHandle_, procFuncs_.spHandle_, &spError_[0]); if (spStatus == SP_FAIL || spStatus == SP_SUCCESS_WARNING) { // Errors or warnings, go get them data.output()->MoveDiags(spError_, spStatus); if (spStatus == SP_FAIL) return FAIL; } state_ = PROCESS; return SUCCESS; } CmpStoredProc::ExecStatus CmpInternalSP::fetch(CmpISPDataObject& data) { CMPASSERT(state_ == PROCESS); // Need to send controls explicitly from the compiler space to the // cli context so ISP requests can perform CLI operations to extract // their values. // For now, only send controls for MODIFY, POPULATE INDEX, TRANSFORM, // PURGEDATA, MV_refresh and VALIDATE sp requests. // This also means RECOVER since these operations // can be perfomed through a RECOVER operation. // // VO, Versioning Light: Also send controls for sp_SchLevel // (UPGRADE and DOWNGRADE) if (procName() == "sp_partn" || procName() == "sp_populate" || procName() == "sp_purgedata" || procName() == "sp_recover" || procName() == "sp_transform" || procName() == "sp_validate" || procName() == "sp_purgedata" || procName() == "sp_refresh" || procName() == "sp_SchLevel") { sendAllControls(FALSE, FALSE, TRUE); // set the parent qid for this session const char *parentQid = cmpContext()->sqlSession()->getParentQid(); if (parentQid != NULL) { setParentQidAtSession(cmpContext()->statementHeap(), parentQid); } } // Send sqlparser_flags if (Get_SqlParser_Flags(ALLOW_FUNNY_IDENTIFIER)) sendParserFlag(ALLOW_FUNNY_IDENTIFIER); initSP_ERROR_STRUCT(); SP_STATUS spStatus = (*(procFuncs_.procFunc_))( SP_PROC_FETCH, (SP_ROW_DATA)data.input(), CmpSPExtractFunc, (SP_ROW_DATA)data.output(), CmpSPFormatFunc, (SP_KEY_VALUE)data.key(), CmpSPKeyValueFunc, &procHandle_, procFuncs_.spHandle_, &spError_[0]); if (spStatus == SP_FAIL) { // Errors, go get them data.output()->MoveDiags(spError_, spStatus); if (CmpCommon::diags() != NULL) { data.output()->MergeDiags(CmpCommon::diags()); CmpCommon::diags()->clear(); } return FAIL; } if (CmpCommon::diags() != NULL) { data.output()->MergeDiags(CmpCommon::diags()); CmpCommon::diags()->clear(); } if ( spStatus == SP_MOREDATA) { return MOREDATA; } return SUCCESS; } CmpStoredProc::ExecStatus CmpInternalSP::close(CmpISPDataObject& data) { CMPASSERT(state_ == PROCESS); initSP_ERROR_STRUCT(); state_ = NONE; SP_STATUS spStatus = (*(procFuncs_.procFunc_))( SP_PROC_CLOSE, (SP_ROW_DATA)data.input(), CmpSPExtractFunc, (SP_ROW_DATA)data.output(), CmpSPFormatFunc, (SP_KEY_VALUE)data.key(), CmpSPKeyValueFunc, &procHandle_, procFuncs_.spHandle_, &spError_[0]); if (spStatus != SP_SUCCESS ) { data.output()->MoveDiags(spError_, spStatus); return FAIL; } return SUCCESS; }
1
14,290
Can we use GetCliGlobals(false)->exCollHeap() here instead
apache-trafodion
cpp
@@ -17,7 +17,10 @@ namespace storage { TEST(AddEdgesTest, SimpleTest) { fs::TempDir rootPath("/tmp/AddEdgesTest.XXXXXX"); - std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path()); + constexpr int32_t partitions = 6; + std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path(), partitions, + {0, network::NetworkUtils::getAvailablePort()}); + TestUtils::waitUntilAllElected(kv.get(), 0, {0, 1, 2, 3, 4, 5}/*partitions*/); auto schemaMan = TestUtils::mockSchemaMan(); auto indexMan = TestUtils::mockIndexMan(); auto* processor = AddEdgesProcessor::instance(kv.get(),
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include "utils/NebulaKeyUtils.h" #include <gtest/gtest.h> #include <rocksdb/db.h> #include "fs/TempDir.h" #include "storage/test/TestUtils.h" #include "storage/mutate/AddEdgesProcessor.h" namespace nebula { namespace storage { TEST(AddEdgesTest, SimpleTest) { fs::TempDir rootPath("/tmp/AddEdgesTest.XXXXXX"); std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path()); auto schemaMan = TestUtils::mockSchemaMan(); auto indexMan = TestUtils::mockIndexMan(); auto* processor = AddEdgesProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); LOG(INFO) << "Build AddEdgesRequest..."; cpp2::AddEdgesRequest req; req.space_id = 0; req.overwritable = true; // partId => List<Edge> // Edge => {EdgeKey, props} for (PartitionID partId = 1; partId <= 3; partId++) { auto edges = TestUtils::setupEdges(partId, partId * 10, 10 * (partId + 1)); req.parts.emplace(partId, std::move(edges)); } LOG(INFO) << "Test AddEdgesProcessor..."; auto fut = processor->getFuture(); processor->process(req); auto resp = std::move(fut).get(); EXPECT_EQ(0, resp.result.failed_codes.size()); LOG(INFO) << "Check data in kv store..."; for (PartitionID partId = 1; partId <= 3; partId++) { for (VertexID srcId = 10 * partId; srcId < 10 * (partId + 1); srcId++) { auto prefix = NebulaKeyUtils::edgePrefix(partId, srcId, 101); std::unique_ptr<kvstore::KVIterator> iter; EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, kv->prefix(0, partId, prefix, &iter)); int num = 0; while (iter->valid()) { auto edgeType = 101; auto dstId = srcId * 100 + 2; EXPECT_EQ(TestUtils::encodeValue(partId, srcId, dstId, edgeType), iter->val().str()); num++; iter->next(); } EXPECT_EQ(1, num); } } } } // namespace storage } // namespace nebula int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); folly::init(&argc, &argv, true); google::SetStderrLogging(google::INFO); return RUN_ALL_TESTS(); }
1
28,916
I suggest you move the waitUntilAllElected into init::KV() method.
vesoft-inc-nebula
cpp
@@ -28,6 +28,11 @@ namespace OpenTelemetry public class BatchExportProcessor<T> : BaseExportProcessor<T> where T : class { + public const int DefaultMaxQueueLength = 2048; + public const int DefaultScheduledDelayMilliseconds = 5000; + public const int DefaultTimeoutMilliseconds = 30000; + public const int DefaultMaxBatchSize = 512; + private readonly CircularBuffer<T> circularBuffer; private readonly int scheduledDelayMilliseconds; private readonly int exporterTimeoutMilliseconds;
1
// <copyright file="BatchExportProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using System.Threading; using OpenTelemetry.Internal; namespace OpenTelemetry { /// <summary> /// Implements processor that batches telemetry objects before calling exporter. /// </summary> /// <typeparam name="T">The type of telemetry object to be exported.</typeparam> public class BatchExportProcessor<T> : BaseExportProcessor<T> where T : class { private readonly CircularBuffer<T> circularBuffer; private readonly int scheduledDelayMilliseconds; private readonly int exporterTimeoutMilliseconds; private readonly int maxExportBatchSize; private readonly Thread exporterThread; private readonly AutoResetEvent exportTrigger = new AutoResetEvent(false); private readonly ManualResetEvent dataExportedNotification = new ManualResetEvent(false); private readonly ManualResetEvent shutdownTrigger = new ManualResetEvent(false); private long shutdownDrainTarget = long.MaxValue; private long droppedCount; /// <summary> /// Initializes a new instance of the <see cref="BatchExportProcessor{T}"/> class. /// </summary> /// <param name="exporter">Exporter instance.</param> /// <param name="maxQueueSize">The maximum queue size. After the size is reached data are dropped. The default value is 2048.</param> /// <param name="scheduledDelayMilliseconds">The delay interval in milliseconds between two consecutive exports. The default value is 5000.</param> /// <param name="exporterTimeoutMilliseconds">How long the export can run before it is cancelled. The default value is 30000.</param> /// <param name="maxExportBatchSize">The maximum batch size of every export. It must be smaller or equal to maxQueueSize. The default value is 512.</param> public BatchExportProcessor( BaseExporter<T> exporter, int maxQueueSize = 2048, int scheduledDelayMilliseconds = 5000, int exporterTimeoutMilliseconds = 30000, int maxExportBatchSize = 512) : base(exporter) { if (maxQueueSize <= 0) { throw new ArgumentOutOfRangeException(nameof(maxQueueSize), maxQueueSize, "maxQueueSize should be greater than zero."); } if (maxExportBatchSize <= 0 || maxExportBatchSize > maxQueueSize) { throw new ArgumentOutOfRangeException(nameof(maxExportBatchSize), maxExportBatchSize, "maxExportBatchSize should be greater than zero and less than maxQueueSize."); } if (scheduledDelayMilliseconds <= 0) { throw new ArgumentOutOfRangeException(nameof(scheduledDelayMilliseconds), scheduledDelayMilliseconds, "scheduledDelayMilliseconds should be greater than zero."); } if (exporterTimeoutMilliseconds < 0) { throw new ArgumentOutOfRangeException(nameof(exporterTimeoutMilliseconds), exporterTimeoutMilliseconds, "exporterTimeoutMilliseconds should be non-negative."); } this.circularBuffer = new CircularBuffer<T>(maxQueueSize); this.scheduledDelayMilliseconds = scheduledDelayMilliseconds; this.exporterTimeoutMilliseconds = exporterTimeoutMilliseconds; this.maxExportBatchSize = maxExportBatchSize; this.exporterThread = new Thread(new ThreadStart(this.ExporterProc)) { IsBackground = true, Name = $"OpenTelemetry-{nameof(BatchExportProcessor<T>)}-{exporter.GetType().Name}", }; this.exporterThread.Start(); } /// <summary> /// Gets the number of telemetry objects dropped by the processor. /// </summary> internal long DroppedCount => this.droppedCount; /// <summary> /// Gets the number of telemetry objects received by the processor. /// </summary> internal long ReceivedCount => this.circularBuffer.AddedCount + this.DroppedCount; /// <summary> /// Gets the number of telemetry objects processed by the underlying exporter. /// </summary> internal long ProcessedCount => this.circularBuffer.RemovedCount; /// <inheritdoc/> public override void OnEnd(T data) { if (this.circularBuffer.TryAdd(data, maxSpinCount: 50000)) { if (this.circularBuffer.Count >= this.maxExportBatchSize) { this.exportTrigger.Set(); } return; // enqueue succeeded } // either the queue is full or exceeded the spin limit, drop the item on the floor Interlocked.Increment(ref this.droppedCount); } /// <inheritdoc/> protected override bool OnForceFlush(int timeoutMilliseconds) { var tail = this.circularBuffer.RemovedCount; var head = this.circularBuffer.AddedCount; if (head == tail) { return true; // nothing to flush } this.exportTrigger.Set(); if (timeoutMilliseconds == 0) { return false; } var triggers = new WaitHandle[] { this.dataExportedNotification, this.shutdownTrigger }; var sw = Stopwatch.StartNew(); // There is a chance that the export thread finished processing all the data from the queue, // and signaled before we enter wait here, use polling to prevent being blocked indefinitely. const int pollingMilliseconds = 1000; while (true) { if (timeoutMilliseconds == Timeout.Infinite) { WaitHandle.WaitAny(triggers, pollingMilliseconds); } else { var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; if (timeout <= 0) { return this.circularBuffer.RemovedCount >= head; } WaitHandle.WaitAny(triggers, Math.Min((int)timeout, pollingMilliseconds)); } if (this.circularBuffer.RemovedCount >= head) { return true; } if (this.shutdownDrainTarget != long.MaxValue) { return false; } } } /// <inheritdoc/> protected override bool OnShutdown(int timeoutMilliseconds) { this.shutdownDrainTarget = this.circularBuffer.AddedCount; this.shutdownTrigger.Set(); if (timeoutMilliseconds == Timeout.Infinite) { this.exporterThread.Join(); return this.exporter.Shutdown(); } if (timeoutMilliseconds == 0) { return this.exporter.Shutdown(0); } var sw = Stopwatch.StartNew(); this.exporterThread.Join(timeoutMilliseconds); var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; return this.exporter.Shutdown((int)Math.Max(timeout, 0)); } private void ExporterProc() { var triggers = new WaitHandle[] { this.exportTrigger, this.shutdownTrigger }; while (true) { // only wait when the queue doesn't have enough items, otherwise keep busy and send data continuously if (this.circularBuffer.Count < this.maxExportBatchSize) { WaitHandle.WaitAny(triggers, this.scheduledDelayMilliseconds); } if (this.circularBuffer.Count > 0) { this.exporter.Export(new Batch<T>(this.circularBuffer, this.maxExportBatchSize)); this.dataExportedNotification.Set(); this.dataExportedNotification.Reset(); } if (this.circularBuffer.RemovedCount >= this.shutdownDrainTarget) { break; } } } } }
1
18,097
no need of pub;lic.
open-telemetry-opentelemetry-dotnet
.cs
@@ -237,6 +237,10 @@ type ConsensusParams struct { // max decimal precision for assets MaxAssetDecimals uint32 + + // whether to use the old buggy Credential.lowestOutput function + // TODO(upgrade): Please remove as soon as the upgrade goes through + UseBuggyProposalLowestOutput bool } // Consensus tracks the protocol-level settings for different versions of the
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package config import ( "encoding/json" "errors" "io" "os" "os/user" "path/filepath" "strconv" "strings" "time" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/codecs" ) // Devnet identifies the 'development network' use for development and not generally accessible publicly const Devnet protocol.NetworkID = "devnet" // Devtestnet identifies the 'development network for tests' use for running tests against development and not generally accessible publicly const Devtestnet protocol.NetworkID = "devtestnet" // Testnet identifies the publicly-available test network const Testnet protocol.NetworkID = "testnet" // Mainnet identifies the publicly-available real-money network const Mainnet protocol.NetworkID = "mainnet" // GenesisJSONFile is the name of the genesis.json file const GenesisJSONFile = "genesis.json" // Global defines global Algorand protocol parameters which should not be overriden. type Global struct { SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential) BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block) } // Protocol holds the global configuration settings for the agreement protocol, // initialized with our current defaults. This is used across all nodes we create. var Protocol = Global{ SmallLambda: 2000 * time.Millisecond, BigLambda: 15000 * time.Millisecond, } // ConsensusParams specifies settings that might vary based on the // particular version of the consensus protocol. type ConsensusParams struct { // Consensus protocol upgrades. Votes for upgrades are collected for // UpgradeVoteRounds. If the number of positive votes is over // UpgradeThreshold, the proposal is accepted. // // UpgradeVoteRounds needs to be long enough to collect an // accurate sample of participants, and UpgradeThreshold needs // to be high enough to ensure that there are sufficient participants // after the upgrade. // // A consensus protocol upgrade may specify the delay between its // acceptance and its execution. This gives clients time to notify // users. This delay is specified by the upgrade proposer and must // be between MinUpgradeWaitRounds and MaxUpgradeWaitRounds (inclusive) // in the old protocol's parameters. Note that these parameters refer // to the representation of the delay in a block rather than the actual // delay: if the specified delay is zero, it is equivalent to // DefaultUpgradeWaitRounds. // // The maximum length of a consensus version string is // MaxVersionStringLen. UpgradeVoteRounds uint64 UpgradeThreshold uint64 DefaultUpgradeWaitRounds uint64 MinUpgradeWaitRounds uint64 MaxUpgradeWaitRounds uint64 MaxVersionStringLen int // MaxTxnBytesPerBlock determines the maximum number of bytes // that transactions can take up in a block. Specifically, // the sum of the lengths of encodings of each transaction // in a block must not exceed MaxTxnBytesPerBlock. MaxTxnBytesPerBlock int // MaxTxnBytesPerBlock is the maximum size of a transaction's Note field. MaxTxnNoteBytes int // MaxTxnLife is how long a transaction can be live for: // the maximum difference between LastValid and FirstValid. // // Note that in a protocol upgrade, the ledger must first be upgraded // to hold more past blocks for this value to be raised. MaxTxnLife uint64 // ApprovedUpgrades describes the upgrade proposals that this protocol // implementation will vote for, along with their delay value // (in rounds). A delay value of zero is the same as a delay of // DefaultUpgradeWaitRounds. ApprovedUpgrades map[protocol.ConsensusVersion]uint64 // SupportGenesisHash indicates support for the GenesisHash // fields in transactions (and requires them in blocks). SupportGenesisHash bool // RequireGenesisHash indicates that GenesisHash must be present // in every transaction. RequireGenesisHash bool // DefaultKeyDilution specifies the granularity of top-level ephemeral // keys. KeyDilution is the number of second-level keys in each batch, // signed by a top-level "batch" key. The default value can be // overriden in the account state. DefaultKeyDilution uint64 // MinBalance specifies the minimum balance that can appear in // an account. To spend money below MinBalance requires issuing // an account-closing transaction, which transfers all of the // money from the account, and deletes the account state. MinBalance uint64 // MinTxnFee specifies the minimum fee allowed on a transaction. // A minimum fee is necessary to prevent DoS. In some sense this is // a way of making the spender subsidize the cost of storing this transaction. MinTxnFee uint64 // RewardUnit specifies the number of MicroAlgos corresponding to one reward // unit. // // Rewards are received by whole reward units. Fractions of // RewardUnits do not receive rewards. RewardUnit uint64 // RewardsRateRefreshInterval is the number of rounds after which the // rewards level is recomputed for the next RewardsRateRefreshInterval rounds. RewardsRateRefreshInterval uint64 // seed-related parameters SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec // ledger retention policy MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for // sortition threshold factors NumProposers uint64 SoftCommitteeSize uint64 SoftCommitteeThreshold uint64 CertCommitteeSize uint64 CertCommitteeThreshold uint64 NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant NextCommitteeThreshold uint64 LateCommitteeSize uint64 LateCommitteeThreshold uint64 RedoCommitteeSize uint64 RedoCommitteeThreshold uint64 DownCommitteeSize uint64 DownCommitteeThreshold uint64 FastRecoveryLambda time.Duration // time between fast recovery attempts FastPartitionRecovery bool // set when fast partition recovery is enabled // commit to payset using a hash of entire payset, // instead of txid merkle tree PaysetCommitFlat bool MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks // support for the efficient encoding in SignedTxnInBlock SupportSignedTxnInBlock bool // force the FeeSink address to be non-participating in the genesis balances. ForceNonParticipatingFeeSink bool // support for ApplyData in SignedTxnInBlock ApplyData bool // track reward distributions in ApplyData RewardsInApplyData bool // domain-separated credentials CredentialDomainSeparationEnabled bool // support for transactions that mark an account non-participating SupportBecomeNonParticipatingTransactions bool // fix the rewards calculation by avoiding subtracting too much from the rewards pool PendingResidueRewards bool // asset support Asset bool // max number of assets per account MaxAssetsPerAccount int // max length of asset name MaxAssetNameBytes int // max length of asset unit name MaxAssetUnitNameBytes int // max length of asset url MaxAssetURLBytes int // support sequential transaction counter TxnCounter TxnCounter bool // transaction groups SupportTxGroups bool // max group size MaxTxGroupSize int // support for transaction leases SupportTransactionLeases bool // 0 for no support, otherwise highest version supported LogicSigVersion uint64 // len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this LogicSigMaxSize uint64 // sum of estimated op cost must be less than this LogicSigMaxCost uint64 // max decimal precision for assets MaxAssetDecimals uint32 } // Consensus tracks the protocol-level settings for different versions of the // consensus protocol. var Consensus map[protocol.ConsensusVersion]ConsensusParams func init() { Consensus = make(map[protocol.ConsensusVersion]ConsensusParams) initConsensusProtocols() initConsensusTestProtocols() // This must appear last, since it depends on all of the other // versions to already be registered (by the above calls). initConsensusTestFastUpgrade() // Allow tuning SmallLambda for faster consensus in single-machine e2e // tests. Useful for development. This might make sense to fold into // a protocol-version-specific setting, once we move SmallLambda into // ConsensusParams. algoSmallLambda, err := strconv.ParseInt(os.Getenv("ALGOSMALLLAMBDAMSEC"), 10, 64) if err == nil { Protocol.SmallLambda = time.Duration(algoSmallLambda) * time.Millisecond } } func initConsensusProtocols() { // WARNING: copying a ConsensusParams by value into a new variable // does not copy the ApprovedUpgrades map. Make sure that each new // ConsensusParams structure gets a fresh ApprovedUpgrades map. // Base consensus protocol version, v7. v7 := ConsensusParams{ UpgradeVoteRounds: 10000, UpgradeThreshold: 9000, DefaultUpgradeWaitRounds: 10000, MaxVersionStringLen: 64, MinBalance: 10000, MinTxnFee: 1000, MaxTxnLife: 1000, MaxTxnNoteBytes: 1024, MaxTxnBytesPerBlock: 1000000, DefaultKeyDilution: 10000, MaxTimestampIncrement: 25, RewardUnit: 1e6, RewardsRateRefreshInterval: 5e5, ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{}, NumProposers: 30, SoftCommitteeSize: 2500, SoftCommitteeThreshold: 1870, CertCommitteeSize: 1000, CertCommitteeThreshold: 720, NextCommitteeSize: 10000, NextCommitteeThreshold: 7750, LateCommitteeSize: 10000, LateCommitteeThreshold: 7750, RedoCommitteeSize: 10000, RedoCommitteeThreshold: 7750, DownCommitteeSize: 10000, DownCommitteeThreshold: 7750, FastRecoveryLambda: 5 * time.Minute, SeedLookback: 2, SeedRefreshInterval: 100, MaxBalLookback: 320, MaxTxGroupSize: 1, } v7.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV7] = v7 // v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis v8 := v7 v8.SeedRefreshInterval = 80 v8.NumProposers = 9 v8.SoftCommitteeSize = 2990 v8.SoftCommitteeThreshold = 2267 v8.CertCommitteeSize = 1500 v8.CertCommitteeThreshold = 1112 v8.NextCommitteeSize = 5000 v8.NextCommitteeThreshold = 3838 v8.LateCommitteeSize = 5000 v8.LateCommitteeThreshold = 3838 v8.RedoCommitteeSize = 5000 v8.RedoCommitteeThreshold = 3838 v8.DownCommitteeSize = 5000 v8.DownCommitteeThreshold = 3838 v8.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV8] = v8 // v7 can be upgraded to v8. v7.ApprovedUpgrades[protocol.ConsensusV8] = 0 // v9 increases the minimum balance to 100,000 microAlgos. v9 := v8 v9.MinBalance = 100000 v9.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV9] = v9 // v8 can be upgraded to v9. v8.ApprovedUpgrades[protocol.ConsensusV9] = 0 // v10 introduces fast partition recovery (and also raises NumProposers). v10 := v9 v10.FastPartitionRecovery = true v10.NumProposers = 20 v10.LateCommitteeSize = 500 v10.LateCommitteeThreshold = 320 v10.RedoCommitteeSize = 2400 v10.RedoCommitteeThreshold = 1768 v10.DownCommitteeSize = 6000 v10.DownCommitteeThreshold = 4560 v10.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV10] = v10 // v9 can be upgraded to v10. v9.ApprovedUpgrades[protocol.ConsensusV10] = 0 // v11 introduces SignedTxnInBlock. v11 := v10 v11.SupportSignedTxnInBlock = true v11.PaysetCommitFlat = true v11.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV11] = v11 // v10 can be upgraded to v11. v10.ApprovedUpgrades[protocol.ConsensusV11] = 0 // v12 increases the maximum length of a version string. v12 := v11 v12.MaxVersionStringLen = 128 v12.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV12] = v12 // v11 can be upgraded to v12. v11.ApprovedUpgrades[protocol.ConsensusV12] = 0 // v13 makes the consensus version a meaningful string. v13 := v12 v13.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV13] = v13 // v12 can be upgraded to v13. v12.ApprovedUpgrades[protocol.ConsensusV13] = 0 // v14 introduces tracking of closing amounts in ApplyData, and enables // GenesisHash in transactions. v14 := v13 v14.ApplyData = true v14.SupportGenesisHash = true v14.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV14] = v14 // v13 can be upgraded to v14. v13.ApprovedUpgrades[protocol.ConsensusV14] = 0 // v15 introduces tracking of reward distributions in ApplyData. v15 := v14 v15.RewardsInApplyData = true v15.ForceNonParticipatingFeeSink = true v15.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV15] = v15 // v14 can be upgraded to v15. v14.ApprovedUpgrades[protocol.ConsensusV15] = 0 // v16 fixes domain separation in credentials. v16 := v15 v16.CredentialDomainSeparationEnabled = true v16.RequireGenesisHash = true v16.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV16] = v16 // v15 can be upgraded to v16. v15.ApprovedUpgrades[protocol.ConsensusV16] = 0 // ConsensusV17 points to 'final' spec commit v17 := v16 v17.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV17] = v17 // v16 can be upgraded to v17. v16.ApprovedUpgrades[protocol.ConsensusV17] = 0 // ConsensusV18 points to reward calculation spec commit v18 := v17 v18.PendingResidueRewards = true v18.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} v18.TxnCounter = true v18.Asset = true v18.LogicSigVersion = 1 v18.LogicSigMaxSize = 1000 v18.LogicSigMaxCost = 20000 v18.MaxAssetsPerAccount = 1000 v18.SupportTxGroups = true v18.MaxTxGroupSize = 16 v18.SupportTransactionLeases = true v18.SupportBecomeNonParticipatingTransactions = true v18.MaxAssetNameBytes = 32 v18.MaxAssetUnitNameBytes = 8 v18.MaxAssetURLBytes = 32 Consensus[protocol.ConsensusV18] = v18 // ConsensusV19 is the official spec commit ( teal, assets, group tx ) v19 := v18 v19.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusV19] = v19 // v18 can be upgraded to v19. v18.ApprovedUpgrades[protocol.ConsensusV19] = 0 // v17 can be upgraded to v19. v17.ApprovedUpgrades[protocol.ConsensusV19] = 0 // v20 points to adding the precision to the assets. v20 := v19 v20.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} v20.MaxAssetDecimals = 19 // we want to adjust the upgrade time to be roughly one week. // one week, in term of rounds would be: // 140651 = (7 * 24 * 60 * 60 / 4.3) // for the sake of future manual calculations, we'll round that down // a bit : v20.DefaultUpgradeWaitRounds = 140000 Consensus[protocol.ConsensusV20] = v20 // v19 can be upgraded to v20. v19.ApprovedUpgrades[protocol.ConsensusV20] = 0 // ConsensusFuture is used to test features that are implemented // but not yet released in a production protocol version. vFuture := v20 vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} vFuture.MinUpgradeWaitRounds = 10000 vFuture.MaxUpgradeWaitRounds = 150000 Consensus[protocol.ConsensusFuture] = vFuture } func initConsensusTestProtocols() { // Various test protocol versions Consensus[protocol.ConsensusTest0] = ConsensusParams{ UpgradeVoteRounds: 2, UpgradeThreshold: 1, DefaultUpgradeWaitRounds: 2, MaxVersionStringLen: 64, MaxTxnBytesPerBlock: 1000000, DefaultKeyDilution: 10000, ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{ protocol.ConsensusTest1: 0, }, } Consensus[protocol.ConsensusTest1] = ConsensusParams{ UpgradeVoteRounds: 10, UpgradeThreshold: 8, DefaultUpgradeWaitRounds: 10, MaxVersionStringLen: 64, MaxTxnBytesPerBlock: 1000000, DefaultKeyDilution: 10000, ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{}, } testBigBlocks := Consensus[protocol.ConsensusCurrentVersion] testBigBlocks.MaxTxnBytesPerBlock = 100000000 testBigBlocks.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusTestBigBlocks] = testBigBlocks rapidRecalcParams := Consensus[protocol.ConsensusCurrentVersion] rapidRecalcParams.RewardsRateRefreshInterval = 10 //because rapidRecalcParams is based on ConsensusCurrentVersion, //it *shouldn't* have any ApprovedUpgrades //but explicitly mark "no approved upgrades" just in case rapidRecalcParams.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusTestRapidRewardRecalculation] = rapidRecalcParams // Setting the testShorterLookback parameters derived from ConsensusCurrentVersion // Will result in MaxBalLookback = 32 // Used to run tests faster where past MaxBalLookback values are checked testShorterLookback := Consensus[protocol.ConsensusCurrentVersion] testShorterLookback.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md testShorterLookback.SeedLookback = 2 testShorterLookback.SeedRefreshInterval = 8 testShorterLookback.MaxBalLookback = 2 * testShorterLookback.SeedLookback * testShorterLookback.SeedRefreshInterval // 32 Consensus[protocol.ConsensusTestShorterLookback] = testShorterLookback // The following two protocols: testUnupgradedProtocol and testUnupgradedToProtocol // are used to test the case when some nodes in the network do not make progress. // testUnupgradedToProtocol is derived from ConsensusCurrentVersion and upgraded // from testUnupgradedProtocol. testUnupgradedToProtocol := Consensus[protocol.ConsensusCurrentVersion] testUnupgradedToProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} Consensus[protocol.ConsensusTestUnupgradedToProtocol] = testUnupgradedToProtocol // testUnupgradedProtocol is used to control the upgrade of a node. This is used // to construct and run a network where some node is upgraded, and some other // node is not upgraded. // testUnupgradedProtocol is derived from ConsensusCurrentVersion and upgrades to // testUnupgradedToProtocol. testUnupgradedProtocol := Consensus[protocol.ConsensusCurrentVersion] testUnupgradedProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} testUnupgradedProtocol.UpgradeVoteRounds = 3 testUnupgradedProtocol.UpgradeThreshold = 2 testUnupgradedProtocol.DefaultUpgradeWaitRounds = 3 b, err := strconv.ParseBool(os.Getenv("ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE")) // Do not upgrade to the next version if // ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE is set to true (e.g. 1, TRUE) if err == nil && b { // Configure as if testUnupgradedToProtocol is not supported by the binary delete(Consensus, protocol.ConsensusTestUnupgradedToProtocol) } else { // Direct upgrade path from ConsensusTestUnupgradedProtocol to ConsensusTestUnupgradedToProtocol // This is needed for the voting nodes vote to upgrade to the next protocol testUnupgradedProtocol.ApprovedUpgrades[protocol.ConsensusTestUnupgradedToProtocol] = 0 } Consensus[protocol.ConsensusTestUnupgradedProtocol] = testUnupgradedProtocol } func initConsensusTestFastUpgrade() { fastUpgradeProtocols := make(map[protocol.ConsensusVersion]ConsensusParams) for proto, params := range Consensus { fastParams := params fastParams.UpgradeVoteRounds = 5 fastParams.UpgradeThreshold = 3 fastParams.DefaultUpgradeWaitRounds = 5 fastParams.MaxVersionStringLen += len(protocol.ConsensusTestFastUpgrade("")) fastParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64) for ver := range params.ApprovedUpgrades { fastParams.ApprovedUpgrades[protocol.ConsensusTestFastUpgrade(ver)] = 0 } fastUpgradeProtocols[protocol.ConsensusTestFastUpgrade(proto)] = fastParams } // Put the test protocols into the Consensus struct; this // is done as a separate step so we don't recurse forever. for proto, params := range fastUpgradeProtocols { Consensus[proto] = params } } // Local holds the per-node-instance configuration settings for the protocol. type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. Version uint32 // environmental (may be overridden) // if true, does not garbage collect; also, replies to catchup requests Archival bool // gossipNode.go // how many peers to propagate to? GossipFanout int NetAddress string ReconnectTime time.Duration // what we should tell people to connect to PublicAddress string MaxConnectionsPerIP int // 0 == disable PeerPingPeriodSeconds int // for https serving TLSCertFile string TLSKeyFile string // Logging BaseLoggerDebugLevel uint32 // if this is 0, do not produce agreement.cadaver CadaverSizeTarget uint64 // IncomingConnectionsLimit specifies the max number of long-lived incoming // connections. 0 means no connections allowed. -1 is unbounded. IncomingConnectionsLimit int // BroadcastConnectionsLimit specifies the number of connections that // will receive broadcast (gossip) messages from this node. If the // node has more connections than this number, it will send broadcasts // to the top connections by priority (outgoing connections first, then // by money held by peers based on their participation key). 0 means // no outgoing messages (not even transaction broadcasting to outgoing // peers). -1 means unbounded (default). BroadcastConnectionsLimit int // AnnounceParticipationKey specifies that this node should announce its // participation key (with the largest stake) to its gossip peers. This // allows peers to prioritize our connection, if necessary, in case of a // DoS attack. Disabling this means that the peers will not have any // additional information to allow them to prioritize our connection. AnnounceParticipationKey bool // PriorityPeers specifies peer IP addresses that should always get // outgoing broadcast messages from this node. PriorityPeers map[string]bool // To make sure the algod process does not run out of FDs, algod ensures // that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e., // IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant // to leave room for short-lived FDs like DNS queries, SQLite files, etc. ReservedFDs uint64 // local server // API endpoint address EndpointAddress string // timeouts passed to the rest http.Server implementation RestReadTimeoutSeconds int RestWriteTimeoutSeconds int // SRV-based phonebook DNSBootstrapID string // Log file size limit in bytes LogSizeLimit uint64 // text/template for creating log archive filename. // Available template vars: // Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}} // Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}} // // If the filename ends with .gz or .bz2 it will be compressed. // // default: "node.archive.log" (no rotation, clobbers previous archive) LogArchiveName string // LogArchiveMaxAge will be parsed by time.ParseDuration(). // Valid units are 's' seconds, 'm' minutes, 'h' hours LogArchiveMaxAge string // number of consecutive attempts to catchup after which we replace the peers we're connected to CatchupFailurePeerRefreshRate int // where should the node exporter listen for metrics NodeExporterListenAddress string // enable metric reporting flag EnableMetricReporting bool // enable top accounts reporting flag EnableTopAccountsReporting bool // enable agreement reporting flag. Currently only prints additional period events. EnableAgreementReporting bool // enable agreement timing metrics flag EnableAgreementTimeMetrics bool // The path to the node exporter. NodeExporterPath string // The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records FallbackDNSResolverAddress string // exponential increase factor of transaction pool's fee threshold, should always be 2 in production TxPoolExponentialIncreaseFactor uint64 SuggestedFeeBlockHistory int // TxPoolSize is the number of transactions that fit in the transaction pool TxPoolSize int // number of seconds allowed for syncing transactions TxSyncTimeoutSeconds int64 // number of seconds between transaction synchronizations TxSyncIntervalSeconds int64 // the number of incoming message hashes buckets. IncomingMessageFilterBucketCount int // the size of each incoming message hash bucket. IncomingMessageFilterBucketSize int // the number of outgoing message hashes buckets. OutgoingMessageFilterBucketCount int // the size of each outgoing message hash bucket. OutgoingMessageFilterBucketSize int // enable the filtering of outgoing messages EnableOutgoingNetworkMessageFiltering bool // enable the filtering of incoming messages EnableIncomingMessageFilter bool // control enabling / disabling deadlock detection. // negative (-1) to disable, positive (1) to enable, 0 for default. DeadlockDetection int // Prefer to run algod Hosted (under algoh) // Observed by `goal` for now. RunHosted bool // The maximal number of blocks that catchup will fetch in parallel. // If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup. CatchupParallelBlocks uint64 // Generate AssembleBlockMetrics telemetry event EnableAssembleStats bool // Generate ProcessBlockMetrics telemetry event EnableProcessBlockStats bool // SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee SuggestedFeeSlidingWindowSize uint32 // the max size the sync server would return TxSyncServeResponseSize int // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions // Note -- Indexer cannot operate on non Archival nodes IsIndexerActive bool // UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header // field can be used. UseXForwardedForAddressField string // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified. ForceRelayMessages bool // ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount; // see ConnectionsRateLimitingCount description for further information. Providing a zero value // in this variable disables the connection rate limiting. ConnectionsRateLimitingWindowSeconds uint // ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if // a connection request should be accepted or not. The gossip network examine all the incoming requests in the past // ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount // value, the connection is refused. ConnectionsRateLimitingCount uint // EnableRequestLogger enabled the logging of the incoming requests to the telemetry server. EnableRequestLogger bool // PeerConnectionsUpdateInterval defines the interval at which the peer connections information is being sent to the // telemetry ( when enabled ). Defined in seconds. PeerConnectionsUpdateInterval int // EnableProfiler enables the go pprof endpoints, should be false if // the algod api will be exposed to untrusted individuals EnableProfiler bool // TelemetryToLog records messages to node.log that are normally sent to remote event monitoring TelemetryToLog bool } // Filenames of config files within the configdir (e.g. ~/.algorand) // ConfigFilename is the name of the config.json file where we store per-algod-instance settings const ConfigFilename = "config.json" // PhonebookFilename is the name of the phonebook configuration files - no longer used const PhonebookFilename = "phonebook.json" // No longer used in product - still in tests // LedgerFilenamePrefix is the prefix of the name of the ledger database files const LedgerFilenamePrefix = "ledger" // CrashFilename is the name of the agreement database file. // It is used to recover from node crashes. const CrashFilename = "crash.sqlite" // LoadConfigFromDisk returns a Local config structure based on merging the defaults // with settings loaded from the config file from the custom dir. If the custom file // cannot be loaded, the default config is returned (with the error from loading the // custom file). func LoadConfigFromDisk(custom string) (c Local, err error) { return loadConfigFromFile(filepath.Join(custom, ConfigFilename)) } func loadConfigFromFile(configFile string) (c Local, err error) { c = defaultLocal c.Version = 0 // Reset to 0 so we get the version from the loaded file. c, err = mergeConfigFromFile(configFile, c) if err != nil { return } // Migrate in case defaults were changed // If a config file does not have version, it is assumed to be zero. // All fields listed in migrate() might be changed if an actual value matches to default value from a previous version. c, err = migrate(c) return } // GetDefaultLocal returns a copy of the current defaultLocal config func GetDefaultLocal() Local { return defaultLocal } func mergeConfigFromDir(root string, source Local) (Local, error) { return mergeConfigFromFile(filepath.Join(root, ConfigFilename), source) } func mergeConfigFromFile(configpath string, source Local) (Local, error) { f, err := os.Open(configpath) if err != nil { return source, err } defer f.Close() err = loadConfig(f, &source) // For now, all relays (listening for incoming connections) are also Archival // We can change this logic in the future, but it's currently the sanest default. if source.NetAddress != "" { source.Archival = true } return source, err } func loadConfig(reader io.Reader, config *Local) error { dec := json.NewDecoder(reader) return dec.Decode(config) } // DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) { dnsBootstrapString := cfg.DNSBootstrap(networkID) bootstrapArray = strings.Split(dnsBootstrapString, ";") return } // DNSBootstrap returns the network-specific DNSBootstrap identifier func (cfg Local) DNSBootstrap(network protocol.NetworkID) string { // if user hasn't modified the default DNSBootstrapID in the configuration // file and we're targeting a devnet ( via genesis file ), we the // explicit devnet network bootstrap. if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID && network == Devnet { return "devnet.algodev.network" } return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1) } // SaveToDisk writes the Local settings into a root/ConfigFilename file func (cfg Local) SaveToDisk(root string) error { configpath := filepath.Join(root, ConfigFilename) filename := os.ExpandEnv(configpath) return cfg.SaveToFile(filename) } // SaveToFile saves the config to a specific filename, allowing overriding the default name func (cfg Local) SaveToFile(filename string) error { var alwaysInclude []string alwaysInclude = append(alwaysInclude, "Version") return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true) } type phonebookBlackWhiteList struct { Include []string } // LoadPhonebook returns a phonebook loaded from the provided directory, if it exists. // NOTE: We no longer use phonebook for anything but tests, but users should be able to use it func LoadPhonebook(datadir string) ([]string, error) { var entries []string path := filepath.Join(datadir, PhonebookFilename) f, rootErr := os.Open(path) if rootErr != nil { if !os.IsNotExist(rootErr) { return nil, rootErr } } else { defer f.Close() phonebook := phonebookBlackWhiteList{} dec := json.NewDecoder(f) err := dec.Decode(&phonebook) if err != nil { return nil, errors.New("error decoding phonebook! got error: " + err.Error()) } entries = phonebook.Include } // get an initial list of peers return entries, rootErr } // SavePhonebookToDisk writes the phonebook into a root/PhonebookFilename file func SavePhonebookToDisk(entries []string, root string) error { configpath := filepath.Join(root, PhonebookFilename) f, err := os.OpenFile(os.ExpandEnv(configpath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { defer f.Close() err = savePhonebook(entries, f) } return err } func savePhonebook(entries []string, w io.Writer) error { pb := phonebookBlackWhiteList{ Include: entries, } enc := codecs.NewFormattedJSONEncoder(w) return enc.Encode(pb) } var globalConfigFileRoot string // GetConfigFilePath retrieves the full path to a configuration file // These are global configurations - not specific to data-directory / network. func GetConfigFilePath(file string) (string, error) { rootPath, err := GetGlobalConfigFileRoot() if err != nil { return "", err } return filepath.Join(rootPath, file), nil } // GetGlobalConfigFileRoot returns the current root folder for global configuration files. // This will likely only change for tests. func GetGlobalConfigFileRoot() (string, error) { var err error if globalConfigFileRoot == "" { globalConfigFileRoot, err = GetDefaultConfigFilePath() if err == nil { dirErr := os.Mkdir(globalConfigFileRoot, os.ModePerm) if !os.IsExist(dirErr) { err = dirErr } } } return globalConfigFileRoot, err } // SetGlobalConfigFileRoot allows overriding the root folder for global configuration files. // It returns the current one so it can be restored, if desired. // This will likely only change for tests. func SetGlobalConfigFileRoot(rootPath string) string { currentRoot := globalConfigFileRoot globalConfigFileRoot = rootPath return currentRoot } // GetDefaultConfigFilePath retrieves the default directory for global (not per-instance) config files // By default we store in ~/.algorand/. // This will likely only change for tests. func GetDefaultConfigFilePath() (string, error) { currentUser, err := user.Current() if err != nil { return "", err } if currentUser.HomeDir == "" { return "", errors.New("GetDefaultConfigFilePath fail - current user has no home directory") } return filepath.Join(currentUser.HomeDir, ".algorand"), nil }
1
37,307
Do you think we can name this variable in a less ...negative.. language ? maybe `ProposalTieBreakerEqualizer`
algorand-go-algorand
go
@@ -48,7 +48,7 @@ class PartitionKey implements StructLike { private final Accessor<InternalRow>[] accessors; @SuppressWarnings("unchecked") - PartitionKey(PartitionSpec spec) { + PartitionKey(PartitionSpec spec, Schema inputSchema) { this.spec = spec; List<PartitionField> fields = spec.fields();
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.source; import com.google.common.collect.Maps; import java.lang.reflect.Array; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.Map; import org.apache.iceberg.PartitionField; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.StructLike; import org.apache.iceberg.spark.SparkSchemaUtil; import org.apache.iceberg.transforms.Transform; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; class PartitionKey implements StructLike { private final PartitionSpec spec; private final int size; private final Object[] partitionTuple; private final Transform[] transforms; private final Accessor<InternalRow>[] accessors; @SuppressWarnings("unchecked") PartitionKey(PartitionSpec spec) { this.spec = spec; List<PartitionField> fields = spec.fields(); this.size = fields.size(); this.partitionTuple = new Object[size]; this.transforms = new Transform[size]; this.accessors = (Accessor<InternalRow>[]) Array.newInstance(Accessor.class, size); Schema schema = spec.schema(); Map<Integer, Accessor<InternalRow>> newAccessors = buildAccessors(schema); for (int i = 0; i < size; i += 1) { PartitionField field = fields.get(i); Accessor<InternalRow> accessor = newAccessors.get(field.sourceId()); if (accessor == null) { throw new RuntimeException( "Cannot build accessor for field: " + schema.findField(field.sourceId())); } this.accessors[i] = accessor; this.transforms[i] = field.transform(); } } private PartitionKey(PartitionKey toCopy) { this.spec = toCopy.spec; this.size = toCopy.size; this.partitionTuple = new Object[toCopy.partitionTuple.length]; this.transforms = toCopy.transforms; this.accessors = toCopy.accessors; for (int i = 0; i < partitionTuple.length; i += 1) { this.partitionTuple[i] = defensiveCopyIfNeeded(toCopy.partitionTuple[i]); } } private Object defensiveCopyIfNeeded(Object obj) { if (obj instanceof UTF8String) { // bytes backing the UTF8 string might be reused byte[] bytes = ((UTF8String) obj).getBytes(); return UTF8String.fromBytes(Arrays.copyOf(bytes, bytes.length)); } return obj; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("["); for (int i = 0; i < partitionTuple.length; i += 1) { if (i > 0) { sb.append(", "); } sb.append(partitionTuple[i]); } sb.append("]"); return sb.toString(); } PartitionKey copy() { return new PartitionKey(this); } String toPath() { return spec.partitionToPath(this); } @SuppressWarnings("unchecked") void partition(InternalRow row) { for (int i = 0; i < partitionTuple.length; i += 1) { Transform<Object, Object> transform = transforms[i]; partitionTuple[i] = transform.apply(accessors[i].get(row)); } } @Override public int size() { return size; } @Override @SuppressWarnings("unchecked") public <T> T get(int pos, Class<T> javaClass) { return javaClass.cast(partitionTuple[pos]); } @Override public <T> void set(int pos, T value) { partitionTuple[pos] = value; } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (!(o instanceof PartitionKey)) { return false; } PartitionKey that = (PartitionKey) o; return Arrays.equals(partitionTuple, that.partitionTuple); } @Override public int hashCode() { return Arrays.hashCode(partitionTuple); } private interface Accessor<T> { Object get(T container); } private static Map<Integer, Accessor<InternalRow>> buildAccessors(Schema schema) { return TypeUtil.visit(schema, new BuildPositionAccessors()); } private static Accessor<InternalRow> newAccessor(int position, Type type) { switch (type.typeId()) { case STRING: return new StringAccessor(position, SparkSchemaUtil.convert(type)); case DECIMAL: return new DecimalAccessor(position, SparkSchemaUtil.convert(type)); case BINARY: return new BytesAccessor(position, SparkSchemaUtil.convert(type)); default: return new PositionAccessor(position, SparkSchemaUtil.convert(type)); } } private static Accessor<InternalRow> newAccessor(int position, boolean isOptional, Types.StructType type, Accessor<InternalRow> accessor) { int size = type.fields().size(); if (isOptional) { // the wrapped position handles null layers return new WrappedPositionAccessor(position, size, accessor); } else if (accessor instanceof PositionAccessor) { return new Position2Accessor(position, size, (PositionAccessor) accessor); } else if (accessor instanceof Position2Accessor) { return new Position3Accessor(position, size, (Position2Accessor) accessor); } else { return new WrappedPositionAccessor(position, size, accessor); } } private static class BuildPositionAccessors extends TypeUtil.SchemaVisitor<Map<Integer, Accessor<InternalRow>>> { @Override public Map<Integer, Accessor<InternalRow>> schema( Schema schema, Map<Integer, Accessor<InternalRow>> structResult) { return structResult; } @Override public Map<Integer, Accessor<InternalRow>> struct( Types.StructType struct, List<Map<Integer, Accessor<InternalRow>>> fieldResults) { Map<Integer, Accessor<InternalRow>> accessors = Maps.newHashMap(); List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fieldResults.size(); i += 1) { Types.NestedField field = fields.get(i); Map<Integer, Accessor<InternalRow>> result = fieldResults.get(i); if (result != null) { for (Map.Entry<Integer, Accessor<InternalRow>> entry : result.entrySet()) { accessors.put(entry.getKey(), newAccessor(i, field.isOptional(), field.type().asNestedType().asStructType(), entry.getValue())); } } else { accessors.put(field.fieldId(), newAccessor(i, field.type())); } } if (accessors.isEmpty()) { return null; } return accessors; } @Override public Map<Integer, Accessor<InternalRow>> field( Types.NestedField field, Map<Integer, Accessor<InternalRow>> fieldResult) { return fieldResult; } } private static class PositionAccessor implements Accessor<InternalRow> { private final DataType type; private int position; private PositionAccessor(int position, DataType type) { this.position = position; this.type = type; } @Override public Object get(InternalRow row) { if (row.isNullAt(position)) { return null; } return row.get(position, type); } DataType type() { return type; } int position() { return position; } } private static class StringAccessor extends PositionAccessor { private StringAccessor(int position, DataType type) { super(position, type); } @Override public Object get(InternalRow row) { if (row.isNullAt(position())) { return null; } return row.get(position(), type()).toString(); } } private static class DecimalAccessor extends PositionAccessor { private DecimalAccessor(int position, DataType type) { super(position, type); } @Override public Object get(InternalRow row) { if (row.isNullAt(position())) { return null; } return ((Decimal) row.get(position(), type())).toJavaBigDecimal(); } } private static class BytesAccessor extends PositionAccessor { private BytesAccessor(int position, DataType type) { super(position, type); } @Override public Object get(InternalRow row) { if (row.isNullAt(position())) { return null; } return ByteBuffer.wrap((byte[]) row.get(position(), type())); } } private static class Position2Accessor implements Accessor<InternalRow> { private final int p0; private final int size0; private final int p1; private final DataType type; private Position2Accessor(int position, int size, PositionAccessor wrapped) { this.p0 = position; this.size0 = size; this.p1 = wrapped.position; this.type = wrapped.type; } @Override public Object get(InternalRow row) { return row.getStruct(p0, size0).get(p1, type); } } private static class Position3Accessor implements Accessor<InternalRow> { private final int p0; private final int size0; private final int p1; private final int size1; private final int p2; private final DataType type; private Position3Accessor(int position, int size, Position2Accessor wrapped) { this.p0 = position; this.size0 = size; this.p1 = wrapped.p0; this.size1 = wrapped.size0; this.p2 = wrapped.p1; this.type = wrapped.type; } @Override public Object get(InternalRow row) { return row.getStruct(p0, size0).getStruct(p1, size1).get(p2, type); } } private static class WrappedPositionAccessor implements Accessor<InternalRow> { private final int position; private final int size; private final Accessor<InternalRow> accessor; private WrappedPositionAccessor(int position, int size, Accessor<InternalRow> accessor) { this.position = position; this.size = size; this.accessor = accessor; } @Override public Object get(InternalRow row) { InternalRow inner = row.getStruct(position, size); if (inner != null) { return accessor.get(inner); } return null; } } }
1
17,570
Can you add back these newlines? We try to avoid non-functional changes like this because they can cause commit conflicts.
apache-iceberg
java
@@ -755,7 +755,8 @@ const reducer = (state = defaultState, action) => { case 'SEARCH_ADD': { newState = Object.assign({}, state); - const searches = newState.searches.slice(); + let searches = newState.searches.slice(); + searches = searches.filter(s => s.title !== action.search.title); searches.push(action.search); newState.searches = searches; }
1
const Note = require('lib/models/Note.js'); const Folder = require('lib/models/Folder.js'); const ArrayUtils = require('lib/ArrayUtils.js'); const { ALL_NOTES_FILTER_ID } = require('lib/reserved-ids'); const defaultState = { notes: [], notesSource: '', notesParentType: null, folders: [], tags: [], masterKeys: [], notLoadedMasterKeys: [], searches: [], selectedNoteIds: [], selectedNoteHash: '', selectedFolderId: null, selectedTagId: null, selectedSearchId: null, selectedItemType: 'note', lastSelectedNotesIds: { Folder: {}, Tag: {}, Search: {}, }, showSideMenu: false, screens: {}, historyCanGoBack: false, syncStarted: false, syncReport: {}, searchQuery: '', settings: {}, sharedData: null, appState: 'starting', hasDisabledSyncItems: false, hasDisabledEncryptionItems: false, customCss: '', templates: [], collapsedFolderIds: [], clipperServer: { startState: 'idle', port: null, }, decryptionWorker: { state: 'idle', itemIndex: 0, itemCount: 0, }, selectedNoteTags: [], resourceFetcher: { toFetchCount: 0, }, backwardHistoryNotes: [], forwardHistoryNotes: [], plugins: {}, provisionalNoteIds: [], editorNoteStatuses: {}, }; const stateUtils = {}; const derivedStateCache_ = {}; // Allows, for a given state, to return the same derived // objects, to prevent unecessary updates on calling components. const cacheEnabledOutput = (key, output) => { key = `${key}_${JSON.stringify(output)}`; if (derivedStateCache_[key]) return derivedStateCache_[key]; derivedStateCache_[key] = output; return derivedStateCache_[key]; }; stateUtils.notesOrder = function(stateSettings) { return cacheEnabledOutput('notesOrder', [ { by: stateSettings['notes.sortOrder.field'], dir: stateSettings['notes.sortOrder.reverse'] ? 'DESC' : 'ASC', }, ]); }; stateUtils.foldersOrder = function(stateSettings) { return cacheEnabledOutput('foldersOrder', [ { by: stateSettings['folders.sortOrder.field'], dir: stateSettings['folders.sortOrder.reverse'] ? 'DESC' : 'ASC', }, ]); }; stateUtils.parentItem = function(state) { const t = state.notesParentType; let id = null; if (t === 'Folder') id = state.selectedFolderId; if (t === 'Tag') id = state.selectedTagId; if (t === 'Search') id = state.selectedSearchId; if (!t || !id) return null; return { type: t, id: id }; }; stateUtils.lastSelectedNoteIds = function(state) { const parent = stateUtils.parentItem(state); if (!parent) return []; const output = state.lastSelectedNotesIds[parent.type][parent.id]; return output ? output : []; }; stateUtils.getLastSeenNote = function(state) { const selectedNoteIds = state.selectedNoteIds; const notes = state.notes; if (selectedNoteIds != null && selectedNoteIds.length > 0) { const currNote = notes.find(note => note.id === selectedNoteIds[0]); if (currNote != null) { return { id: currNote.id, parent_id: currNote.parent_id, }; } } }; function arrayHasEncryptedItems(array) { for (let i = 0; i < array.length; i++) { if (array[i].encryption_applied) return true; } return false; } function stateHasEncryptedItems(state) { if (arrayHasEncryptedItems(state.notes)) return true; if (arrayHasEncryptedItems(state.folders)) return true; if (arrayHasEncryptedItems(state.tags)) return true; return false; } function folderSetCollapsed(state, action) { const collapsedFolderIds = state.collapsedFolderIds.slice(); const idx = collapsedFolderIds.indexOf(action.id); if (action.collapsed) { if (idx >= 0) return state; collapsedFolderIds.push(action.id); } else { if (idx < 0) return state; collapsedFolderIds.splice(idx, 1); } const newState = Object.assign({}, state); newState.collapsedFolderIds = collapsedFolderIds; return newState; } // When deleting a note, tag or folder function handleItemDelete(state, action) { const map = { FOLDER_DELETE: ['folders', 'selectedFolderId', true], NOTE_DELETE: ['notes', 'selectedNoteIds', false], TAG_DELETE: ['tags', 'selectedTagId', true], SEARCH_DELETE: ['searches', 'selectedSearchId', true], }; const listKey = map[action.type][0]; const selectedItemKey = map[action.type][1]; const isSingular = map[action.type][2]; const selectedItemKeys = isSingular ? [state[selectedItemKey]] : state[selectedItemKey]; const isSelected = selectedItemKeys.includes(action.id); const items = state[listKey]; const newItems = []; let newSelectedIndexes = []; for (let i = 0; i < items.length; i++) { const item = items[i]; if (isSelected) { // the selected item is deleted so select the following item // if multiple items are selected then just use the first one if (selectedItemKeys[0] == item.id) { newSelectedIndexes.push(newItems.length); } } else { // the selected item/s is not deleted so keep it selected if (selectedItemKeys.includes(item.id)) { newSelectedIndexes.push(newItems.length); } } if (item.id == action.id) { continue; } newItems.push(item); } if (newItems.length == 0) { newSelectedIndexes = []; // no remaining items so no selection } else if (newSelectedIndexes.length == 0) { newSelectedIndexes.push(0); // no selection exists so select the top } else { // when the items at end of list are deleted then select the end for (let i = 0; i < newSelectedIndexes.length; i++) { if (newSelectedIndexes[i] >= newItems.length) { newSelectedIndexes = [newItems.length - 1]; break; } } } const newState = Object.assign({}, state); newState[listKey] = newItems; if (listKey === 'notes') { newState.backwardHistoryNotes = newState.backwardHistoryNotes.filter(note => note.id != action.id); newState.forwardHistoryNotes = newState.forwardHistoryNotes.filter(note => note.id != action.id); } if (listKey === 'folders') { newState.backwardHistoryNotes = newState.backwardHistoryNotes.filter(note => note.parent_id != action.id); newState.forwardHistoryNotes = newState.forwardHistoryNotes.filter(note => note.parent_id != action.id); } const newIds = []; for (let i = 0; i < newSelectedIndexes.length; i++) { newIds.push(newItems[newSelectedIndexes[i]].id); } newState[selectedItemKey] = isSingular ? newIds[0] : newIds; if ((newIds.length == 0) && newState.notesParentType !== 'Folder') { newState.notesParentType = 'Folder'; } return newState; } function updateOneItem(state, action, keyName = '') { let itemsKey = null; if (keyName) { itemsKey = keyName; } else { if (action.type === 'TAG_UPDATE_ONE') itemsKey = 'tags'; if (action.type === 'FOLDER_UPDATE_ONE') itemsKey = 'folders'; if (action.type === 'MASTERKEY_UPDATE_ONE') itemsKey = 'masterKeys'; } const newItems = state[itemsKey].splice(0); const item = action.item; let found = false; for (let i = 0; i < newItems.length; i++) { const n = newItems[i]; if (n.id == item.id) { newItems[i] = Object.assign(newItems[i], item); found = true; break; } } if (!found) newItems.push(item); const newState = Object.assign({}, state); newState[itemsKey] = newItems; return newState; } function defaultNotesParentType(state, exclusion) { let newNotesParentType = null; if (exclusion !== 'Folder' && state.selectedFolderId) { newNotesParentType = 'Folder'; } else if (exclusion !== 'Tag' && state.selectedTagId) { newNotesParentType = 'Tag'; } else if (exclusion !== 'Search' && state.selectedSearchId) { newNotesParentType = 'Search'; } return newNotesParentType; } function changeSelectedFolder(state, action, options = null) { if (!options) options = {}; const newState = Object.assign({}, state); // Save the last seen note so that back will return to it. if (action.type === 'FOLDER_SELECT' && action.historyAction == 'goto') { const backwardHistoryNotes = newState.backwardHistoryNotes.slice(); let forwardHistoryNotes = newState.forwardHistoryNotes.slice(); // Don't update history if going to the same note again. const lastSeenNote = stateUtils.getLastSeenNote(state); if (lastSeenNote != null && action.id != lastSeenNote.id) { forwardHistoryNotes = []; backwardHistoryNotes.push(Object.assign({}, lastSeenNote)); } newState.backwardHistoryNotes = backwardHistoryNotes; newState.forwardHistoryNotes = forwardHistoryNotes; } newState.selectedFolderId = 'folderId' in action ? action.folderId : action.id; if (!newState.selectedFolderId) { newState.notesParentType = defaultNotesParentType(state, 'Folder'); } else { newState.notesParentType = 'Folder'; } if (newState.selectedFolderId === state.selectedFolderId && newState.notesParentType === state.notesParentType) return state; if (options.clearSelectedNoteIds) newState.selectedNoteIds = []; return newState; } function recordLastSelectedNoteIds(state, noteIds) { const newOnes = Object.assign({}, state.lastSelectedNotesIds); const parent = stateUtils.parentItem(state); if (!parent) return state; newOnes[parent.type][parent.id] = noteIds.slice(); return Object.assign({}, state, { lastSelectedNotesIds: newOnes, }); } function changeSelectedNotes(state, action, options = null) { if (!options) options = {}; let noteIds = []; if (action.id) noteIds = [action.id]; if (action.ids) noteIds = action.ids; if (action.noteId) noteIds = [action.noteId]; let newState = Object.assign({}, state); if (action.type === 'NOTE_SELECT') { newState.selectedNoteIds = noteIds; newState.selectedNoteHash = action.hash ? action.hash : ''; const backwardHistoryNotes = newState.backwardHistoryNotes.slice(); let forwardHistoryNotes = newState.forwardHistoryNotes.slice(); // The historyAction property is only used for user-initiated actions and tells how // the history stack should be handled. That property should not be present for // programmatic navigation. Possible values are: // - "goto": When going to a note, but not via the back/forward arrows. // - "pop": When clicking on the Back arrow // - "push": When clicking on the Forward arrow const lastSeenNote = stateUtils.getLastSeenNote(state); if (action.historyAction == 'goto' && lastSeenNote != null && action.id != lastSeenNote.id) { forwardHistoryNotes = []; backwardHistoryNotes.push(Object.assign({}, lastSeenNote)); } else if (action.historyAction === 'pop' && lastSeenNote != null) { if (forwardHistoryNotes.length === 0 || lastSeenNote.id != forwardHistoryNotes[forwardHistoryNotes.length - 1].id) { forwardHistoryNotes.push(Object.assign({}, lastSeenNote)); } backwardHistoryNotes.pop(); } else if (action.historyAction === 'push' && lastSeenNote != null) { if (backwardHistoryNotes.length === 0 || lastSeenNote.id != backwardHistoryNotes[backwardHistoryNotes.length - 1].id) { backwardHistoryNotes.push(Object.assign({}, lastSeenNote)); } forwardHistoryNotes.pop(); } newState.backwardHistoryNotes = backwardHistoryNotes; newState.forwardHistoryNotes = forwardHistoryNotes; return newState; } else if (action.type === 'NOTE_SELECT_ADD') { if (!noteIds.length) return state; newState.selectedNoteIds = ArrayUtils.unique(newState.selectedNoteIds.concat(noteIds)); } else if (action.type === 'NOTE_SELECT_REMOVE') { if (!noteIds.length) return state; // Nothing to unselect if (state.selectedNoteIds.length <= 1) return state; // Cannot unselect the last note const newSelectedNoteIds = []; for (let i = 0; i < newState.selectedNoteIds.length; i++) { const id = newState.selectedNoteIds[i]; if (noteIds.indexOf(id) >= 0) continue; newSelectedNoteIds.push(id); } newState.selectedNoteIds = newSelectedNoteIds; } else if (action.type === 'NOTE_SELECT_TOGGLE') { if (!noteIds.length) return state; if (newState.selectedNoteIds.indexOf(noteIds[0]) >= 0) { newState = changeSelectedNotes(state, { type: 'NOTE_SELECT_REMOVE', id: noteIds[0] }); } else { newState = changeSelectedNotes(state, { type: 'NOTE_SELECT_ADD', id: noteIds[0] }); } } else { throw new Error('Unreachable'); } newState = recordLastSelectedNoteIds(newState, newState.selectedNoteIds); return newState; } function removeItemFromArray(array, property, value) { for (let i = 0; i !== array.length; ++i) { const currentItem = array[i]; if (currentItem[property] === value) { array.splice(i, 1); break; } } return array; } const reducer = (state = defaultState, action) => { let newState = state; try { switch (action.type) { case 'NOTE_SELECT': case 'NOTE_SELECT_ADD': case 'NOTE_SELECT_REMOVE': case 'NOTE_SELECT_TOGGLE': newState = changeSelectedNotes(state, action); break; case 'NOTE_SELECT_EXTEND': { newState = Object.assign({}, state); if (!newState.selectedNoteIds.length) { newState.selectedNoteIds = [action.id]; } else { const selectRangeId1 = state.selectedNoteIds[state.selectedNoteIds.length - 1]; const selectRangeId2 = action.id; if (selectRangeId1 === selectRangeId2) return state; const newSelectedNoteIds = state.selectedNoteIds.slice(); let selectionStarted = false; for (let i = 0; i < state.notes.length; i++) { const id = state.notes[i].id; if (!selectionStarted && (id === selectRangeId1 || id === selectRangeId2)) { selectionStarted = true; if (newSelectedNoteIds.indexOf(id) < 0) newSelectedNoteIds.push(id); continue; } else if (selectionStarted && (id === selectRangeId1 || id === selectRangeId2)) { if (newSelectedNoteIds.indexOf(id) < 0) newSelectedNoteIds.push(id); break; } if (selectionStarted && newSelectedNoteIds.indexOf(id) < 0) { newSelectedNoteIds.push(id); } } newState.selectedNoteIds = newSelectedNoteIds; } } break; case 'NOTE_SELECT_ALL': newState = Object.assign({}, state); newState.selectedNoteIds = newState.notes.map(n => n.id); break; case 'NOTE_SELECT_ALL_TOGGLE': { newState = Object.assign({}, state); const allSelected = state.notes.every(n => state.selectedNoteIds.includes(n.id)); if (allSelected) { newState.selectedNoteIds = []; } else { newState.selectedNoteIds = newState.notes.map(n => n.id); } break; } case 'SMART_FILTER_SELECT': newState = Object.assign({}, state); newState.notesParentType = 'SmartFilter'; newState.selectedSmartFilterId = action.id; break; case 'FOLDER_SELECT': newState = changeSelectedFolder(state, action, { clearSelectedNoteIds: true }); break; case 'FOLDER_AND_NOTE_SELECT': { newState = changeSelectedFolder(state, action); const noteSelectAction = Object.assign({}, action, { type: 'NOTE_SELECT' }); newState = changeSelectedNotes(newState, noteSelectAction); } break; case 'SETTING_UPDATE_ALL': newState = Object.assign({}, state); newState.settings = action.settings; break; case 'SETTING_UPDATE_ONE': { newState = Object.assign({}, state); const newSettings = Object.assign({}, state.settings); newSettings[action.key] = action.value; newState.settings = newSettings; } break; case 'NOTE_PROVISIONAL_FLAG_CLEAR': { const newIds = ArrayUtils.removeElement(state.provisionalNoteIds, action.id); if (newIds !== state.provisionalNoteIds) { newState = Object.assign({}, state, { provisionalNoteIds: newIds }); } } break; // Replace all the notes with the provided array case 'NOTE_UPDATE_ALL': newState = Object.assign({}, state); newState.notes = action.notes; newState.notesSource = action.notesSource; break; // Insert the note into the note list if it's new, or // update it within the note array if it already exists. case 'NOTE_UPDATE_ONE': { const modNote = action.note; const isViewingAllNotes = (state.notesParentType === 'SmartFilter' && state.selectedSmartFilterId === ALL_NOTES_FILTER_ID); const noteIsInFolder = function(note, folderId) { if (note.is_conflict) return folderId === Folder.conflictFolderId(); if (!('parent_id' in modNote) || note.parent_id == folderId) return true; return false; }; let movedNotePreviousIndex = 0; let noteFolderHasChanged = false; let newNotes = state.notes.slice(); let found = false; for (let i = 0; i < newNotes.length; i++) { const n = newNotes[i]; if (n.id == modNote.id) { // Note is still in the same folder if (isViewingAllNotes || noteIsInFolder(modNote, n.parent_id)) { // Merge the properties that have changed (in modNote) into // the object we already have. newNotes[i] = Object.assign({}, newNotes[i]); for (const n in modNote) { if (!modNote.hasOwnProperty(n)) continue; newNotes[i][n] = modNote[n]; } } else { // Note has moved to a different folder newNotes.splice(i, 1); noteFolderHasChanged = true; movedNotePreviousIndex = i; } found = true; break; } } // Note was not found - if the current folder is the same as the note folder, // add it to it. if (!found) { if (isViewingAllNotes || noteIsInFolder(modNote, state.selectedFolderId)) { newNotes.push(modNote); } } // newNotes = Note.sortNotes(newNotes, state.notesOrder, newState.settings.uncompletedTodosOnTop); newNotes = Note.sortNotes(newNotes, stateUtils.notesOrder(state.settings), newState.settings.uncompletedTodosOnTop); newState = Object.assign({}, state); newState.notes = newNotes; if (noteFolderHasChanged) { let newIndex = movedNotePreviousIndex; if (newIndex >= newNotes.length) newIndex = newNotes.length - 1; if (!newNotes.length) newIndex = -1; newState.selectedNoteIds = newIndex >= 0 ? [newNotes[newIndex].id] : []; } if (action.provisional) { newState.provisionalNoteIds.push(modNote.id); } else { const idx = newState.provisionalNoteIds.indexOf(modNote.id); if (idx >= 0) { const t = newState.provisionalNoteIds.slice(); t.splice(idx, 1); newState.provisionalNoteIds = t; } } } break; case 'NOTE_DELETE': { newState = handleItemDelete(state, action); const idx = newState.provisionalNoteIds.indexOf(action.id); if (idx >= 0) { const t = newState.provisionalNoteIds.slice(); t.splice(idx, 1); newState.provisionalNoteIds = t; } } break; case 'TAG_DELETE': newState = handleItemDelete(state, action); newState.selectedNoteTags = removeItemFromArray(newState.selectedNoteTags.splice(0), 'id', action.id); break; case 'FOLDER_UPDATE_ALL': newState = Object.assign({}, state); newState.folders = action.items; break; case 'FOLDER_SET_COLLAPSED': newState = folderSetCollapsed(state, action); break; case 'FOLDER_TOGGLE': if (state.collapsedFolderIds.indexOf(action.id) >= 0) { newState = folderSetCollapsed(state, Object.assign({ collapsed: false }, action)); } else { newState = folderSetCollapsed(state, Object.assign({ collapsed: true }, action)); } break; case 'FOLDER_SET_COLLAPSED_ALL': newState = Object.assign({}, state); newState.collapsedFolderIds = action.ids.slice(); break; case 'TAG_UPDATE_ALL': newState = Object.assign({}, state); newState.tags = action.items; break; case 'TAG_SELECT': newState = Object.assign({}, state); newState.selectedTagId = action.id; if (!action.id) { newState.notesParentType = defaultNotesParentType(state, 'Tag'); } else { newState.notesParentType = 'Tag'; } newState.selectedNoteIds = []; break; case 'TAG_UPDATE_ONE': { // We only want to update the selected note tags if the tag belongs to the currently open note const selectedNoteHasTag = !!state.selectedNoteTags.find(tag => tag.id === action.item.id); newState = updateOneItem(state, action); if (selectedNoteHasTag) newState = updateOneItem(newState, action, 'selectedNoteTags'); } break; case 'NOTE_TAG_REMOVE': { newState = updateOneItem(state, action, 'tags'); const tagRemoved = action.item; newState.selectedNoteTags = removeItemFromArray(newState.selectedNoteTags.splice(0), 'id', tagRemoved.id); } break; case 'EDITOR_NOTE_STATUS_SET': { const newStatuses = Object.assign({}, state.editorNoteStatuses); newStatuses[action.id] = action.status; newState = Object.assign({}, state, { editorNoteStatuses: newStatuses }); } break; case 'EDITOR_NOTE_STATUS_REMOVE': { const newStatuses = Object.assign({}, state.editorNoteStatuses); delete newStatuses[action.id]; newState = Object.assign({}, state, { editorNoteStatuses: newStatuses }); } break; case 'FOLDER_UPDATE_ONE': case 'MASTERKEY_UPDATE_ONE': newState = updateOneItem(state, action); break; case 'FOLDER_DELETE': newState = handleItemDelete(state, action); break; case 'MASTERKEY_UPDATE_ALL': newState = Object.assign({}, state); newState.masterKeys = action.items; break; case 'MASTERKEY_SET_NOT_LOADED': newState = Object.assign({}, state); newState.notLoadedMasterKeys = action.ids; break; case 'MASTERKEY_ADD_NOT_LOADED': { if (state.notLoadedMasterKeys.indexOf(action.id) < 0) { newState = Object.assign({}, state); const keys = newState.notLoadedMasterKeys.slice(); keys.push(action.id); newState.notLoadedMasterKeys = keys; } } break; case 'MASTERKEY_REMOVE_NOT_LOADED': { const ids = action.id ? [action.id] : action.ids; for (let i = 0; i < ids.length; i++) { const id = ids[i]; const index = state.notLoadedMasterKeys.indexOf(id); if (index >= 0) { newState = Object.assign({}, state); const keys = newState.notLoadedMasterKeys.slice(); keys.splice(index, 1); newState.notLoadedMasterKeys = keys; } } } break; case 'SYNC_STARTED': newState = Object.assign({}, state); newState.syncStarted = true; break; case 'SYNC_COMPLETED': newState = Object.assign({}, state); newState.syncStarted = false; break; case 'SYNC_REPORT_UPDATE': newState = Object.assign({}, state); newState.syncReport = action.report; break; case 'SEARCH_QUERY': newState = Object.assign({}, state); newState.searchQuery = action.query.trim(); break; case 'SEARCH_ADD': { newState = Object.assign({}, state); const searches = newState.searches.slice(); searches.push(action.search); newState.searches = searches; } break; case 'SEARCH_UPDATE': { newState = Object.assign({}, state); const searches = newState.searches.slice(); let found = false; for (let i = 0; i < searches.length; i++) { if (searches[i].id === action.search.id) { searches[i] = Object.assign({}, action.search); found = true; break; } } if (!found) searches.push(action.search); if (!action.search.query_pattern) { newState.notesParentType = defaultNotesParentType(state, 'Search'); } else { newState.notesParentType = 'Search'; } newState.searches = searches; } break; case 'SEARCH_DELETE': newState = handleItemDelete(state, action); break; case 'SEARCH_SELECT': { newState = Object.assign({}, state); newState.selectedSearchId = action.id; if (!action.id) { newState.notesParentType = defaultNotesParentType(state, 'Search'); } else { newState.notesParentType = 'Search'; } // Update history when searching const lastSeenNote = stateUtils.getLastSeenNote(state); if (lastSeenNote != null && (state.backwardHistoryNotes.length === 0 || state.backwardHistoryNotes[state.backwardHistoryNotes.length - 1].id != lastSeenNote.id)) { newState.forwardHistoryNotes = []; newState.backwardHistoryNotes.push(Object.assign({},lastSeenNote)); } newState.selectedNoteIds = []; } break; case 'APP_STATE_SET': newState = Object.assign({}, state); newState.appState = action.state; break; case 'SYNC_HAS_DISABLED_SYNC_ITEMS': newState = Object.assign({}, state); newState.hasDisabledSyncItems = true; break; case 'ENCRYPTION_HAS_DISABLED_ITEMS': newState = Object.assign({}, state); newState.hasDisabledEncryptionItems = action.value; break; case 'CLIPPER_SERVER_SET': { newState = Object.assign({}, state); const clipperServer = Object.assign({}, newState.clipperServer); if ('startState' in action) clipperServer.startState = action.startState; if ('port' in action) clipperServer.port = action.port; newState.clipperServer = clipperServer; } break; case 'DECRYPTION_WORKER_SET': { newState = Object.assign({}, state); const decryptionWorker = Object.assign({}, newState.decryptionWorker); for (const n in action) { if (!action.hasOwnProperty(n) || n === 'type') continue; decryptionWorker[n] = action[n]; } newState.decryptionWorker = decryptionWorker; } break; case 'RESOURCE_FETCHER_SET': { newState = Object.assign({}, state); const rf = Object.assign({}, action); delete rf.type; newState.resourceFetcher = rf; } break; case 'LOAD_CUSTOM_CSS': newState = Object.assign({}, state); newState.customCss = action.css; break; case 'TEMPLATE_UPDATE_ALL': newState = Object.assign({}, state); newState.templates = action.templates; break; case 'SET_NOTE_TAGS': newState = Object.assign({}, state); newState.selectedNoteTags = action.items; break; case 'PLUGIN_DIALOG_SET': { if (!action.pluginName) throw new Error('action.pluginName not specified'); newState = Object.assign({}, state); const newPlugins = Object.assign({}, newState.plugins); const newPlugin = newState.plugins[action.pluginName] ? Object.assign({}, newState.plugins[action.pluginName]) : {}; if ('open' in action) newPlugin.dialogOpen = action.open; newPlugins[action.pluginName] = newPlugin; newState.plugins = newPlugins; } break; } } catch (error) { error.message = `In reducer: ${error.message} Action: ${JSON.stringify(action)}`; throw error; } if (action.type.indexOf('NOTE_UPDATE') === 0 || action.type.indexOf('FOLDER_UPDATE') === 0 || action.type.indexOf('TAG_UPDATE') === 0) { newState = Object.assign({}, newState); newState.hasEncryptedItems = stateHasEncryptedItems(newState); } return newState; }; module.exports = { reducer, defaultState, stateUtils };
1
13,762
Why not just `if (!searches.includes(action.search)) searches.push(action.search)`? That way you don't need to remove then add the element.
laurent22-joplin
js
@@ -276,6 +276,17 @@ class Demo extends AbstractBase return $a . $b . "." . $c; } + /** + * Generate a fake call number prefix. + * + * @return string + */ + protected function getFakeCallNumPrefix() + { + $codes = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + return substr(str_shuffle($codes), 1, rand(0, 3)); + } + /** * Get a random ID from the Solr index. *
1
<?php /** * Advanced Dummy ILS Driver -- Returns sample values based on Solr index. * * Note that some sample values (holds, transactions, fines) are stored in * the session. You can log out and log back in to get a different set of * values. * * PHP version 7 * * Copyright (C) Villanova University 2007. * Copyright (C) The National Library of Finland 2014. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package ILS_Drivers * @author Greg Pendlebury <[email protected]> * @author Ere Maijala <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki */ namespace VuFind\ILS\Driver; use ArrayObject; use Laminas\Http\Request as HttpRequest; use Laminas\Session\Container as SessionContainer; use VuFind\Date\DateException; use VuFind\Exception\ILS as ILSException; use VuFindSearch\Query\Query; use VuFindSearch\Service as SearchService; /** * Advanced Dummy ILS Driver -- Returns sample values based on Solr index. * * @category VuFind * @package ILS_Drivers * @author Greg Pendlebury <[email protected]> * @author Ere Maijala <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki */ class Demo extends AbstractBase { /** * Connection used when getting random bib ids from Solr * * @var SearchService */ protected $searchService; /** * Total count of records in the Solr index (used for random bib lookup) * * @var int */ protected $totalRecords; /** * Container for storing persistent simulated ILS data. * * @var SessionContainer[] */ protected $session = []; /** * Factory function for constructing the SessionContainer. * * @var callable */ protected $sessionFactory; /** * HTTP Request object (if available). * * @var ?HttpRequest */ protected $request; /** * Should we return bib IDs in MyResearch responses? * * @var bool */ protected $idsInMyResearch = true; /** * Should we support Storage Retrieval Requests? * * @var bool */ protected $storageRetrievalRequests = true; /** * Should we support ILLRequests? * * @var bool */ protected $ILLRequests = true; /** * Date converter object * * @var \VuFind\Date\Converter */ protected $dateConverter; /** * Failure probability settings * * @var array */ protected $failureProbabilities = []; /** * Constructor * * @param \VuFind\Date\Converter $dateConverter Date converter object * @param SearchService $ss Search service * @param callable $sessionFactory Factory function returning * SessionContainer object for fake data to simulate consistency and reduce Solr * hits * @param HttpRequest $request HTTP request object (optional) */ public function __construct(\VuFind\Date\Converter $dateConverter, SearchService $ss, $sessionFactory, HttpRequest $request = null ) { $this->dateConverter = $dateConverter; $this->searchService = $ss; if (!is_callable($sessionFactory)) { throw new \Exception('Invalid session factory passed to constructor.'); } $this->sessionFactory = $sessionFactory; $this->request = $request; } /** * Initialize the driver. * * Validate configuration and perform all resource-intensive tasks needed to * make the driver active. * * @throws ILSException * @return void */ public function init() { if (isset($this->config['Catalog']['idsInMyResearch'])) { $this->idsInMyResearch = $this->config['Catalog']['idsInMyResearch']; } if (isset($this->config['Catalog']['storageRetrievalRequests'])) { $this->storageRetrievalRequests = $this->config['Catalog']['storageRetrievalRequests']; } if (isset($this->config['Catalog']['ILLRequests'])) { $this->ILLRequests = $this->config['Catalog']['ILLRequests']; } if (isset($this->config['Failure_Probabilities'])) { $this->failureProbabilities = $this->config['Failure_Probabilities']; } if (isset($this->config['Holdings'])) { foreach ($this->config['Holdings'] as $id => $json) { foreach (json_decode($json, true) as $i => $status) { $this->setStatus($id, $status, $i > 0); } } } $this->checkIntermittentFailure(); } /** * Check for a simulated failure. Returns true for failure, false for * success. * * @param string $method Name of method that might fail * @param int $default Default probability (if config is empty) * * @return bool */ protected function isFailing($method, $default = 0) { // Method may come in like Class::Method, we just want the Method part $parts = explode('::', $method); $key = array_pop($parts); $probability = $this->failureProbabilities[$key] ?? $default; return rand(1, 100) <= $probability; } /** * Generate a fake location name. * * @param bool $returnText If true, return location text; if false, return ID * * @return string */ protected function getFakeLoc($returnText = true) { $locations = $this->getPickUpLocations(); $loc = rand() % count($locations); return $returnText ? $locations[$loc]['locationDisplay'] : $locations[$loc]['locationID']; } /** * Generate fake services. * * @return array */ protected function getFakeServices() { // Load service configuration; return empty array if no services defined. $services = isset($this->config['Records']['services']) ? (array)$this->config['Records']['services'] : []; if (empty($services)) { return []; } // Make it more likely we have a single service than many: $count = rand(1, 5) == 1 ? rand(1, count($services)) : 1; $keys = (array)array_rand($services, $count); $fakeServices = []; foreach ($keys as $key) { if ($key !== null) { $fakeServices[] = $services[$key]; } } return $fakeServices; } /** * Generate a fake status message. * * @return string */ protected function getFakeStatus() { $loc = rand() % 10; switch ($loc) { case 10: return "Missing"; case 9: return "On Order"; case 8: return "Invoiced"; default: return "Available"; } } /** * Generate a fake call number. * * @return string */ protected function getFakeCallNum() { $codes = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; $a = $codes[rand() % strlen($codes)]; $b = rand() % 899 + 100; $c = rand() % 9999; return $a . $b . "." . $c; } /** * Get a random ID from the Solr index. * * @return string */ protected function getRandomBibId() { [$id] = $this->getRandomBibIdAndTitle(); return $id; } /** * Get a random ID and title from the Solr index. * * @return array [id, title] */ protected function getRandomBibIdAndTitle() { $source = $this->getRecordSource(); $query = $this->config['Records']['query'] ?? '*:*'; $result = $this->searchService->random($source, new Query($query), 1); if (count($result) === 0) { throw new \Exception("Problem retrieving random record from $source."); } $record = current($result->getRecords()); return [$record->getUniqueId(), $record->getTitle()]; } /** * Get the name of the search backend providing records. * * @return string */ protected function getRecordSource() { return $this->config['Records']['source'] ?? DEFAULT_SEARCH_BACKEND; } /** * Should we simulate a system failure? * * @return void * @throws ILSException */ protected function checkIntermittentFailure() { if ($this->isFailing(__METHOD__, 0)) { throw new ILSException('Simulating low-level system failure'); } } /** * Are renewals blocked? * * @return bool */ protected function checkRenewBlock() { return $this->isFailing(__METHOD__, 25); } /** * Check whether the patron is blocked from placing requests (holds/ILL/SRR). * * @param array $patron Patron data from patronLogin(). * * @return mixed A boolean false if no blocks are in place and an array * of block reasons if blocks are in place * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getRequestBlocks($patron) { return $this->isFailing(__METHOD__, 10) ? ['simulated request block'] : false; } /** * Check whether the patron has any blocks on their account. * * @param array $patron Patron data from patronLogin(). * * @return mixed A boolean false if no blocks are in place and an array * of block reasons if blocks are in place * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getAccountBlocks($patron) { return $this->isFailing(__METHOD__, 10) ? ['simulated account block'] : false; } /** * Generates a random, fake holding array * * @param string $id set id * @param string $number set number for multiple items * @param array $patron Patron data * * @return array */ protected function getRandomHolding($id, $number, array $patron = null) { $status = $this->getFakeStatus(); $location = $this->getFakeLoc(); $locationhref = ($location === 'Campus A') ? 'http://campus-a' : false; $result = [ 'id' => $id, 'source' => $this->getRecordSource(), 'item_id' => $number, 'number' => $number, 'barcode' => sprintf("%08d", rand() % 50000), 'availability' => $status == 'Available', 'status' => $status, 'location' => $location, 'locationhref' => $locationhref, 'reserve' => (rand() % 100 > 49) ? 'Y' : 'N', 'callnumber' => $this->getFakeCallNum(), 'duedate' => '', 'is_holdable' => true, 'addLink' => $patron ? true : false, 'level' => 'copy', 'storageRetrievalRequest' => 'auto', 'addStorageRetrievalRequestLink' => $patron ? 'check' : false, 'ILLRequest' => 'auto', 'addILLRequestLink' => $patron ? 'check' : false, 'services' => $status == 'Available' ? $this->getFakeServices() : [], ]; switch (rand(1, 5)) { case 1: $result['location'] = 'Digital copy available'; $result['locationhref'] = 'http://digital'; $result['__electronic__'] = true; $result['availability'] = true; $result['status'] = ''; break; case 2: $result['location'] = 'Electronic Journals'; $result['locationhref'] = 'http://electronic'; $result['__electronic__'] = true; $result['availability'] = true; $result['status'] = 'Available from ' . rand(2010, 2019); } return $result; } /** * Generate an associative array containing some sort of ID (for cover * generation). * * @return array */ protected function getRandomItemIdentifier() { switch (rand(1, 4)) { case 1: return ['isbn' => '1558612742']; case 2: return ['oclc' => '55114477']; case 3: return ['issn' => '1133-0686']; } return ['upc' => '733961100525']; } /** * Generate a list of holds, storage retrieval requests or ILL requests. * * @param string $requestType Request type (Holds, StorageRetrievalRequests or * ILLRequests) * * @return ArrayObject List of requests */ protected function createRequestList($requestType) { // How many items are there? %10 - 1 = 10% chance of none, // 90% of 1-9 (give or take some odd maths) $items = rand() % 10 - 1; $requestGroups = $this->getRequestGroups(null, null); $list = new ArrayObject(); for ($i = 0; $i < $items; $i++) { $location = $this->getFakeLoc(false); $randDays = rand() % 10; $currentItem = [ "location" => $location, "create" => $this->dateConverter->convertToDisplayDate( 'U', strtotime("now - {$randDays} days") ), "expire" => $this->dateConverter->convertToDisplayDate( 'U', strtotime("now + 30 days") ), "item_id" => $i, "reqnum" => $i ]; // Inject a random identifier of some sort: $currentItem += $this->getRandomItemIdentifier(); if ($i == 2 || rand() % 5 == 1) { // Mimic an ILL request $currentItem["id"] = "ill_request_$i"; $currentItem["title"] = "ILL Hold Title $i"; $currentItem['institution_id'] = 'ill_institution'; $currentItem['institution_name'] = 'ILL Library'; $currentItem['institution_dbkey'] = 'ill_institution'; } else { if ($this->idsInMyResearch) { [$currentItem['id'], $currentItem['title']] = $this->getRandomBibIdAndtitle(); $currentItem['source'] = $this->getRecordSource(); } else { $currentItem['title'] = 'Demo Title ' . $i; } } if ($requestType == 'Holds') { $pos = rand() % 5; if ($pos > 1) { $currentItem['position'] = $pos; } else { $currentItem['available'] = true; if (rand() % 3 != 1) { $lastDate = strtotime('now + 3 days'); $currentItem['last_pickup_date'] = $this->dateConverter ->convertToDisplayDate('U', $lastDate); } } $pos = rand(0, count($requestGroups) - 1); $currentItem['requestGroup'] = $requestGroups[$pos]['name']; } else { $status = rand() % 5; $currentItem['available'] = $status == 1; $currentItem['canceled'] = $status == 2; $currentItem['processed'] = ($status == 1 || rand(1, 3) == 3) ? $this->dateConverter->convertToDisplayDate('U', time()) : ''; if ($requestType == 'ILLRequests') { $transit = rand() % 2; if (!$currentItem['available'] && !$currentItem['canceled'] && $transit == 1 ) { $currentItem['in_transit'] = $location; } else { $currentItem['in_transit'] = false; } } } $list->append($currentItem); } return $list; } /** * Get Status * * This is responsible for retrieving the status information of a certain * record. * * @param string $id The record id to retrieve the holdings for * * @return mixed On success, an associative array with the following keys: * id, availability (boolean), status, location, reserve, callnumber. */ public function getStatus($id) { return $this->getSimulatedStatus($id); } /** * Get suppressed records. * * @return array ID numbers of suppressed records in the system. */ public function getSuppressedRecords() { return $this->config['Records']['suppressed'] ?? []; } /** * Get the session container (constructing it on demand if not already present) * * @param string $patron ID of current patron * * @return SessionContainer */ protected function getSession($patron = null) { // We have a separate session for each user ID; if none is specified, // try to pick the first one arbitrarily; the difference only matters // when testing multiple accounts. $selectedPatron = empty($patron) ? (current(array_keys($this->session)) ?: 'default') : md5($patron); // SessionContainer not defined yet? Build it now: if (!isset($this->session[$selectedPatron])) { $factory = $this->sessionFactory; $this->session[$selectedPatron] = $factory($selectedPatron); } $result = $this->session[$selectedPatron]; // Special case: check for clear_demo request parameter to reset: if ($this->request && $this->request->getQuery('clear_demo')) { $result->exchangeArray([]); } return $result; } /** * Get Simulated Status (support method for getStatus/getHolding) * * This is responsible for retrieving the status information of a certain * record. * * @param string $id The record id to retrieve the holdings for * @param array $patron Patron data * * @return mixed On success, an associative array with the following keys: * id, availability (boolean), status, location, reserve, callnumber. */ protected function getSimulatedStatus($id, array $patron = null) { $id = (string)$id; // Do we have a fake status persisted in the session? $session = $this->getSession($patron['id'] ?? null); if (isset($session->statuses[$id])) { return $session->statuses[$id]; } // Create fake entries for a random number of items $holding = []; $records = rand() % 15; for ($i = 1; $i <= $records; $i++) { $holding[] = $this->setStatus($id, [], true, $patron); } return $holding; } /** * Set Status * * @param array $id id for record * @param array $holding associative array with options to specify * number, barcode, availability, status, location, * reserve, callnumber, duedate, is_holdable, and addLink * @param bool $append add another record or replace current record * @param array $patron Patron data * * @return array */ protected function setStatus($id, $holding = [], $append = true, $patron = null) { $id = (string)$id; $session = $this->getSession($patron['id'] ?? null); $i = isset($session->statuses[$id]) ? count($session->statuses[$id]) + 1 : 1; $holding = array_merge($this->getRandomHolding($id, $i, $patron), $holding); // if statuses is already stored if ($session->statuses) { // and this id is part of it if ($append && isset($session->statuses[$id])) { // add to the array $session->statuses[$id][] = $holding; } else { // if we're over-writing or if there's nothing stored for this id $session->statuses[$id] = [$holding]; } } else { // brand new status storage! $session->statuses = [$id => [$holding]]; } return $holding; } /** * Get Statuses * * This is responsible for retrieving the status information for a * collection of records. * * @param array $ids The array of record ids to retrieve the status for * * @return array An array of getStatus() return values on success. */ public function getStatuses($ids) { $this->checkIntermittentFailure(); return array_map([$this, 'getStatus'], $ids); } /** * Get Holding * * This is responsible for retrieving the holding information of a certain * record. * * @param string $id The record id to retrieve the holdings for * @param array $patron Patron data * @param array $options Extra options * * @return array On success, an associative array with the following keys: * id, availability (boolean), status, location, reserve, callnumber, * duedate, number, barcode. */ public function getHolding($id, array $patron = null, array $options = []) { $this->checkIntermittentFailure(); // Get basic status info: $status = $this->getSimulatedStatus($id, $patron); $issue = 1; // Add notes and summary: foreach (array_keys($status) as $i) { $itemNum = $i + 1; $noteCount = rand(1, 3); $status[$i]['holdings_notes'] = []; $status[$i]['item_notes'] = []; for ($j = 1; $j <= $noteCount; $j++) { $status[$i]['holdings_notes'][] = "Item $itemNum holdings note $j" . ($j === 1 ? ' https://vufind.org/?f=1&b=2#sample_link' : ''); $status[$i]['item_notes'][] = "Item $itemNum note $j"; } $summCount = rand(1, 3); $status[$i]['summary'] = []; for ($j = 1; $j <= $summCount; $j++) { $status[$i]['summary'][] = "Item $itemNum summary $j"; } $volume = intdiv($issue, 4) + 1; $seriesIssue = $issue % 4; $issue = $issue + 1; $status[$i]['enumchron'] = "volume $volume, issue $seriesIssue"; } // Filter out electronic holdings from the normal holdings list: $status = array_filter( $status, function ($a) { return !($a['__electronic__'] ?? false); } ); // Slice out a chunk if pagination is enabled. $slice = null; if ($options['itemLimit'] ?? null) { // For sensible pagination, we need to sort by location: $callback = function ($a, $b) { return strcmp($a['location'], $b['location']); }; usort($status, $callback); $slice = array_slice( $status, $options['offset'] ?? 0, $options['itemLimit'] ); } // Electronic holdings: $statuses = $this->getStatus($id); $electronic = []; foreach ($statuses as $item) { if ($item['__electronic__'] ?? false) { // Don't expose internal __electronic__ flag upstream: unset($item['__electronic__']); $electronic[] = $item; } } // Send back final value: return [ 'total' => count($status), 'holdings' => $slice ?: $status, 'electronic_holdings' => $electronic ]; } /** * Get Purchase History * * This is responsible for retrieving the acquisitions history data for the * specific record (usually recently received issues of a serial). * * @param string $id The record id to retrieve the info for * * @return array An array with the acquisitions data on success. */ public function getPurchaseHistory($id) { $this->checkIntermittentFailure(); $issues = rand(0, 3); $retval = []; for ($i = 0; $i < $issues; $i++) { $retval[] = ['issue' => 'issue ' . ($i + 1)]; } return $retval; } /** * Patron Login * * This is responsible for authenticating a patron against the catalog. * * @param string $username The patron username * @param string $password The patron password * * @throws ILSException * @return mixed Associative array of patron info on successful login, * null on unsuccessful login. */ public function patronLogin($username, $password) { $this->checkIntermittentFailure(); $user = [ 'id' => trim($username), 'firstname' => 'Lib', 'lastname' => 'Rarian', 'cat_username' => trim($username), 'cat_password' => trim($password), 'email' => '[email protected]', 'major' => null, 'college' => null ]; $loginMethod = $this->config['Catalog']['loginMethod'] ?? 'password'; if ('email' === $loginMethod) { $user['email'] = $username; $user['cat_password'] = ''; return $user; } if (isset($this->config['Users'])) { if (!isset($this->config['Users'][$username]) || $password !== $this->config['Users'][$username] ) { return null; } } return $user; } /** * Get Patron Profile * * This is responsible for retrieving the profile for a specific patron. * * @param array $patron The patron array * * @return array Array of the patron's profile data on success. */ public function getMyProfile($patron) { $this->checkIntermittentFailure(); $patron = [ 'firstname' => 'Lib-' . $patron['cat_username'], 'lastname' => 'Rarian', 'address1' => 'Somewhere...', 'address2' => 'Over the Rainbow', 'zip' => '12345', 'city' => 'City', 'country' => 'Country', 'phone' => '1900 CALL ME', 'mobile_phone' => '1234567890', 'group' => 'Library Staff', 'expiration_date' => 'Someday' ]; return $patron; } /** * Get Patron Fines * * This is responsible for retrieving all fines by a specific patron. * * @param array $patron The patron array from patronLogin * * @return mixed Array of the patron's fines on success. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getMyFines($patron) { $this->checkIntermittentFailure(); $session = $this->getSession($patron['id'] ?? null); if (!isset($session->fines)) { // How many items are there? %20 - 2 = 10% chance of none, // 90% of 1-18 (give or take some odd maths) $fines = rand() % 20 - 2; $fineList = []; for ($i = 0; $i < $fines; $i++) { // How many days overdue is the item? $day_overdue = rand() % 30 + 5; // Calculate checkout date: $checkout = strtotime("now - " . ($day_overdue + 14) . " days"); // 50c a day fine? $fine = $day_overdue * 0.50; $fineList[] = [ "amount" => $fine * 100, "checkout" => $this->dateConverter ->convertToDisplayDate('U', $checkout), 'createdate' => $this->dateConverter ->convertToDisplayDate('U', time()), // After 20 days it becomes 'Long Overdue' "fine" => $day_overdue > 20 ? "Long Overdue" : "Overdue", // 50% chance they've paid half of it "balance" => (rand() % 100 > 49 ? $fine / 2 : $fine) * 100, "duedate" => $this->dateConverter->convertToDisplayDate( 'U', strtotime("now - $day_overdue days") ) ]; // Some fines will have no id or title: if (rand() % 3 != 1) { if ($this->idsInMyResearch) { [$fineList[$i]['id'], $fineList[$i]['title']] = $this->getRandomBibIdAndTitle(); $fineList[$i]['source'] = $this->getRecordSource(); } else { $fineList[$i]['title'] = 'Demo Title ' . $i; } } } $session->fines = $fineList; } return $session->fines; } /** * Get Patron Holds * * This is responsible for retrieving all holds by a specific patron. * * @param array $patron The patron array from patronLogin * * @return mixed Array of the patron's holds on success. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getMyHolds($patron) { $this->checkIntermittentFailure(); $session = $this->getSession($patron['id'] ?? null); if (!isset($session->holds)) { $session->holds = $this->createRequestList('Holds'); } return $session->holds; } /** * Get Patron Storage Retrieval Requests * * This is responsible for retrieving all call slips by a specific patron. * * @param array $patron The patron array from patronLogin * * @return mixed Array of the patron's holds * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getMyStorageRetrievalRequests($patron) { $this->checkIntermittentFailure(); $session = $this->getSession($patron['id'] ?? null); if (!isset($session->storageRetrievalRequests)) { $session->storageRetrievalRequests = $this->createRequestList('StorageRetrievalRequests'); } return $session->storageRetrievalRequests; } /** * Get Patron ILL Requests * * This is responsible for retrieving all ILL requests by a specific patron. * * @param array $patron The patron array from patronLogin * * @return mixed Array of the patron's ILL requests * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getMyILLRequests($patron) { $this->checkIntermittentFailure(); $session = $this->getSession($patron['id'] ?? null); if (!isset($session->ILLRequests)) { $session->ILLRequests = $this->createRequestList('ILLRequests'); } return $session->ILLRequests; } /** * Construct a transaction list for getMyTransactions; may be random or * pre-set depending on Demo.ini settings. * * @return array */ protected function getTransactionList() { $this->checkIntermittentFailure(); // If Demo.ini includes a fixed set of transactions, load those; otherwise // build some random ones. return isset($this->config['Records']['transactions']) ? json_decode($this->config['Records']['transactions'], true) : $this->getRandomTransactionList(); } /** * Calculate the due status for a due date. * * @param int $due Due date as Unix timestamp * * @return string */ protected function calculateDueStatus($due) { $dueRelative = $due - time(); if ($dueRelative < 0) { return 'overdue'; } elseif ($dueRelative < 24 * 60 * 60) { return 'due'; } return false; } /** * Construct a random set of transactions for getMyTransactions(). * * @return array */ protected function getRandomTransactionList() { // How many items are there? %10 - 1 = 10% chance of none, // 90% of 1-9 (give or take some odd maths) $trans = rand() % 10 - 1; $transList = []; for ($i = 0; $i < $trans; $i++) { // When is it due? +/- up to 15 days $due_relative = rand() % 30 - 15; // Due date $rawDueDate = strtotime( 'now ' . ($due_relative >= 0 ? '+' : '') . $due_relative . ' days' ); // Times renewed : 0,0,0,0,0,1,2,3,4,5 $renew = rand() % 10 - 5; if ($renew < 0) { $renew = 0; } // Renewal limit $renewLimit = $renew + rand() % 3; // Pending requests : 0,0,0,0,0,1,2,3,4,5 $req = rand() % 10 - 5; if ($req < 0) { $req = 0; } // Create a generic transaction: $transList[] = $this->getRandomItemIdentifier() + [ // maintain separate display vs. raw due dates (the raw // one is used for renewals, in case the user display // format is incompatible with date math). 'duedate' => $this->dateConverter->convertToDisplayDate( 'U', $rawDueDate ), 'rawduedate' => $rawDueDate, 'dueStatus' => $this->calculateDueStatus($rawDueDate), 'barcode' => sprintf("%08d", rand() % 50000), 'renew' => $renew, 'renewLimit' => $renewLimit, 'request' => $req, 'item_id' => $i, 'renewable' => $renew < $renewLimit, ]; if ($i == 2 || rand() % 5 == 1) { // Mimic an ILL loan $transList[$i] += [ 'id' => "ill_institution_$i", 'title' => "ILL Loan Title $i", 'institution_id' => 'ill_institution', 'institution_name' => 'ILL Library', 'institution_dbkey' => 'ill_institution', 'borrowingLocation' => 'ILL Service Desk' ]; } else { $transList[$i]['borrowingLocation'] = $this->getFakeLoc(); if ($this->idsInMyResearch) { [$transList[$i]['id'], $transList[$i]['title']] = $this->getRandomBibIdAndTitle(); $transList[$i]['source'] = $this->getRecordSource(); } else { $transList[$i]['title'] = 'Demo Title ' . $i; } } } return $transList; } /** * Get Patron Transactions * * This is responsible for retrieving all transactions (i.e. checked out items) * by a specific patron. * * @param array $patron The patron array from patronLogin * @param array $params Parameters * * @return mixed Array of the patron's transactions on success. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getMyTransactions($patron, $params = []) { $this->checkIntermittentFailure(); $session = $this->getSession($patron['id'] ?? null); if (!isset($session->transactions)) { $session->transactions = $this->getTransactionList(); } // Order $transactions = $session->transactions; if (!empty($params['sort'])) { $sort = explode( ' ', !empty($params['sort']) ? $params['sort'] : 'date_due desc', 2 ); $descending = isset($sort[1]) && 'desc' === $sort[1]; usort( $transactions, function ($a, $b) use ($sort, $descending) { if ('title' === $sort[0]) { $cmp = strcmp($a['title'] ?? '', $b['title'] ?? ''); } else { $cmp = $a['rawduedate'] - $b['rawduedate']; } return $descending ? -$cmp : $cmp; } ); } if (isset($params['limit'])) { $limit = $params['limit'] ?? 50; $offset = isset($params['page']) ? ($params['page'] - 1) * $limit : 0; $transactions = array_slice($transactions, $offset, $limit); } return [ 'count' => count($session->transactions), 'records' => $transactions ]; } /** * Construct a historic transaction list for getMyTransactionHistory; may be * random or pre-set depending on Demo.ini settings. * * @return array */ protected function getHistoricTransactionList() { $this->checkIntermittentFailure(); // If Demo.ini includes a fixed set of transactions, load those; otherwise // build some random ones. return isset($this->config['Records']['historicTransactions']) ? json_decode($this->config['Records']['historicTransactions'], true) : $this->getRandomHistoricTransactionList(); } /** * Construct a random set of transactions for getMyTransactionHistory(). * * @return array */ protected function getRandomHistoricTransactionList() { // How many items are there? %10 - 1 = 10% chance of none, // 90% of 1-150 (give or take some odd maths) $trans = rand() % 10 - 1 > 0 ? rand() % 15 : 0; $transList = []; for ($i = 0; $i < $trans; $i++) { // Checkout date $relative = rand() % 300; $checkoutDate = strtotime("now -$relative days"); // Due date (7-30 days from checkout) $dueDate = $checkoutDate + 60 * 60 * 24 * (rand() % 23 + 7); // Return date (1-40 days from checkout and < now) $returnDate = min( [$checkoutDate + 60 * 60 * 24 * (rand() % 39 + 1), time()] ); // Create a generic transaction: $transList[] = $this->getRandomItemIdentifier() + [ 'checkoutDate' => $this->dateConverter->convertToDisplayDate( 'U', $checkoutDate ), 'dueDate' => $this->dateConverter->convertToDisplayDate( 'U', $dueDate ), 'returnDate' => $this->dateConverter->convertToDisplayDate( 'U', $returnDate ), // Raw dates for sorting '_checkoutDate' => $checkoutDate, '_dueDate' => $dueDate, '_returnDate' => $returnDate, 'barcode' => sprintf("%08d", rand() % 50000), 'item_id' => $i, ]; if ($this->idsInMyResearch) { [$transList[$i]['id'], $transList[$i]['title']] = $this->getRandomBibIdAndTitle(); $transList[$i]['source'] = $this->getRecordSource(); } else { $transList[$i]['title'] = 'Demo Title ' . $i; } } return $transList; } /** * Get Patron Loan History * * This is responsible for retrieving all historic transactions for a specific * patron. * * @param array $patron The patron array from patronLogin * @param array $params Parameters * * @return mixed Array of the patron's historic transactions on success. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getMyTransactionHistory($patron, $params) { $this->checkIntermittentFailure(); $session = $this->getSession($patron['id'] ?? null); if (!isset($session->historicLoans)) { $session->historicLoans = $this->getHistoricTransactionList(); } // Sort and splice the list $historicLoans = $session->historicLoans; if (isset($params['sort'])) { switch ($params['sort']) { case 'checkout asc': $sorter = function ($a, $b) { return strcmp($a['_checkoutDate'], $b['_checkoutDate']); }; break; case 'return desc': $sorter = function ($a, $b) { return strcmp($b['_returnDate'], $a['_returnDate']); }; break; case 'return asc': $sorter = function ($a, $b) { return strcmp($a['_returnDate'], $b['_returnDate']); }; break; case 'due desc': $sorter = function ($a, $b) { return strcmp($b['_dueDate'], $a['_dueDate']); }; break; case 'due asc': $sorter = function ($a, $b) { return strcmp($a['_dueDate'], $b['_dueDate']); }; break; default: $sorter = function ($a, $b) { return strcmp($b['_checkoutDate'], $a['_checkoutDate']); }; break; } usort($historicLoans, $sorter); } $limit = isset($params['limit']) ? (int)$params['limit'] : 50; $start = isset($params['page']) ? ((int)$params['page'] - 1) * $limit : 0; $historicLoans = array_splice($historicLoans, $start, $limit); return [ 'count' => count($session->historicLoans), 'transactions' => $historicLoans ]; } /** * Get Pick Up Locations * * This is responsible get a list of valid library locations for holds / recall * retrieval * * @param array $patron Patron information returned by the patronLogin * method. * @param array $holdDetails Optional array, only passed in when getting a list * in the context of placing a hold; contains most of the same values passed to * placeHold, minus the patron data. May be used to limit the pickup options * or may be ignored. The driver must not add new options to the return array * based on this data or other areas of VuFind may behave incorrectly. * * @return array An array of associative arrays with locationID and * locationDisplay keys * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getPickUpLocations($patron = false, $holdDetails = null) { $this->checkIntermittentFailure(); return [ [ 'locationID' => 'A', 'locationDisplay' => 'Campus A' ], [ 'locationID' => 'B', 'locationDisplay' => 'Campus B' ], [ 'locationID' => 'C', 'locationDisplay' => 'Campus C' ] ]; } /** * Get Default "Hold Required By" Date (as Unix timestamp) or null if unsupported * * @param array $patron Patron information returned by the patronLogin method. * @param array $holdInfo Contains most of the same values passed to * placeHold, minus the patron data. * * @return int * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getHoldDefaultRequiredDate($patron, $holdInfo) { $this->checkIntermittentFailure(); // 5 years in the future (but similate intermittent failure): return !$this->isFailing(__METHOD__, 50) ? mktime(0, 0, 0, date('m'), date('d'), date('Y') + 5) : null; } /** * Get Default Pick Up Location * * Returns the default pick up location set in HorizonXMLAPI.ini * * @param array $patron Patron information returned by the patronLogin * method. * @param array $holdDetails Optional array, only passed in when getting a list * in the context of placing a hold; contains most of the same values passed to * placeHold, minus the patron data. May be used to limit the pickup options * or may be ignored. * * @return string A location ID * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getDefaultPickUpLocation($patron = false, $holdDetails = null) { $this->checkIntermittentFailure(); $locations = $this->getPickUpLocations($patron); return $locations[0]['locationID']; } /** * Get Default Request Group * * Returns the default request group * * @param array $patron Patron information returned by the patronLogin * method. * @param array $holdDetails Optional array, only passed in when getting a list * in the context of placing a hold; contains most of the same values passed to * placeHold, minus the patron data. May be used to limit the request group * options or may be ignored. * * @return false|string The default request group for the patron. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getDefaultRequestGroup($patron = false, $holdDetails = null) { $this->checkIntermittentFailure(); if ($this->isFailing(__METHOD__, 50)) { return false; } $requestGroups = $this->getRequestGroups(0, 0); return $requestGroups[0]['id']; } /** * Get request groups * * @param int $bibId BIB ID * @param array $patron Patron information returned by the patronLogin * method. * @param array $holdDetails Optional array, only passed in when getting a list * in the context of placing a hold; contains most of the same values passed to * placeHold, minus the patron data. May be used to limit the request group * options or may be ignored. * * @return array False if request groups not in use or an array of * associative arrays with id and name keys * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getRequestGroups($bibId = null, $patron = null, $holdDetails = null ) { $this->checkIntermittentFailure(); return [ [ 'id' => 1, 'name' => 'Main Library' ], [ 'id' => 2, 'name' => 'Branch Library' ] ]; } /** * Get Funds * * Return a list of funds which may be used to limit the getNewItems list. * * @return array An associative array with key = fund ID, value = fund name. */ public function getFunds() { $this->checkIntermittentFailure(); return ["Fund A", "Fund B", "Fund C"]; } /** * Get Departments * * Obtain a list of departments for use in limiting the reserves list. * * @return array An associative array with key = dept. ID, value = dept. name. */ public function getDepartments() { $this->checkIntermittentFailure(); return ["Dept. A", "Dept. B", "Dept. C"]; } /** * Get Instructors * * Obtain a list of instructors for use in limiting the reserves list. * * @return array An associative array with key = ID, value = name. */ public function getInstructors() { $this->checkIntermittentFailure(); return ["Instructor A", "Instructor B", "Instructor C"]; } /** * Get Courses * * Obtain a list of courses for use in limiting the reserves list. * * @return array An associative array with key = ID, value = name. */ public function getCourses() { $this->checkIntermittentFailure(); return ["Course A", "Course B", "Course C"]; } /** * Get New Items * * Retrieve the IDs of items recently added to the catalog. * * @param int $page Page number of results to retrieve (counting starts at 1) * @param int $limit The size of each page of results to retrieve * @param int $daysOld The maximum age of records to retrieve in days (max. 30) * @param int $fundId optional fund ID to use for limiting results (use a value * returned by getFunds, or exclude for no limit); note that "fund" may be a * misnomer - if funds are not an appropriate way to limit your new item * results, you can return a different set of values from getFunds. The * important thing is that this parameter supports an ID returned by getFunds, * whatever that may mean. * * @return array Associative array with 'count' and 'results' keys * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getNewItems($page, $limit, $daysOld, $fundId = null) { $this->checkIntermittentFailure(); // Pick a random number of results to return -- don't exceed limit or 30, // whichever is smaller (this can be pretty slow due to the random ID code). $count = rand(0, $limit > 30 ? 30 : $limit); $results = []; for ($x = 0; $x < $count; $x++) { $randomId = $this->getRandomBibId(); // avoid duplicate entries in array: if (!in_array($randomId, $results)) { $results[] = $randomId; } } $retVal = ['count' => count($results), 'results' => []]; foreach ($results as $result) { $retVal['results'][] = ['id' => $result]; } return $retVal; } /** * Find Reserves * * Obtain information on course reserves. * * @param string $course ID from getCourses (empty string to match all) * @param string $inst ID from getInstructors (empty string to match all) * @param string $dept ID from getDepartments (empty string to match all) * * @return mixed An array of associative arrays representing reserve items. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function findReserves($course, $inst, $dept) { $this->checkIntermittentFailure(); // Pick a random number of results to return -- don't exceed 30. $count = rand(0, 30); $results = []; for ($x = 0; $x < $count; $x++) { $randomId = $this->getRandomBibId(); // avoid duplicate entries in array: if (!in_array($randomId, $results)) { $results[] = $randomId; } } $retVal = []; foreach ($results as $current) { $retVal[] = ['BIB_ID' => $current]; } return $retVal; } /** * Cancel Holds * * Attempts to Cancel a hold or recall on a particular item. The * data in $cancelDetails['details'] is determined by getCancelHoldDetails(). * * @param array $cancelDetails An array of item and patron data * * @return array An array of data on each request including * whether or not it was successful and a system message (if available) */ public function cancelHolds($cancelDetails) { $this->checkIntermittentFailure(); // Rewrite the holds in the session, removing those the user wants to // cancel. $newHolds = new ArrayObject(); $retVal = ['count' => 0, 'items' => []]; $session = $this->getSession($cancelDetails['patron']['id'] ?? null); foreach ($session->holds as $current) { if (!in_array($current['reqnum'], $cancelDetails['details'])) { $newHolds->append($current); } else { if (!$this->isFailing(__METHOD__, 50)) { $retVal['count']++; $retVal['items'][$current['item_id']] = [ 'success' => true, 'status' => 'hold_cancel_success' ]; } else { $newHolds->append($current); $retVal['items'][$current['item_id']] = [ 'success' => false, 'status' => 'hold_cancel_fail', 'sysMessage' => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } } } $session->holds = $newHolds; return $retVal; } /** * Get Cancel Hold Details * * In order to cancel a hold, Voyager requires the patron details an item ID * and a recall ID. This function returns the item id and recall id as a string * separated by a pipe, which is then submitted as form data in Hold.php. This * value is then extracted by the CancelHolds function. * * @param array $holdDetails An array of item data * * @return string Data for use in a form field */ public function getCancelHoldDetails($holdDetails) { return $holdDetails['reqnum']; } /** * Cancel Storage Retrieval Request * * Attempts to Cancel a Storage Retrieval Request on a particular item. The * data in $cancelDetails['details'] is determined by * getCancelStorageRetrievalRequestDetails(). * * @param array $cancelDetails An array of item and patron data * * @return array An array of data on each request including * whether or not it was successful and a system message (if available) */ public function cancelStorageRetrievalRequests($cancelDetails) { $this->checkIntermittentFailure(); // Rewrite the items in the session, removing those the user wants to // cancel. $newRequests = new ArrayObject(); $retVal = ['count' => 0, 'items' => []]; $session = $this->getSession($cancelDetails['patron']['id'] ?? null); foreach ($session->storageRetrievalRequests as $current) { if (!in_array($current['reqnum'], $cancelDetails['details'])) { $newRequests->append($current); } else { if (!$this->isFailing(__METHOD__, 50)) { $retVal['count']++; $retVal['items'][$current['item_id']] = [ 'success' => true, 'status' => 'storage_retrieval_request_cancel_success' ]; } else { $newRequests->append($current); $retVal['items'][$current['item_id']] = [ 'success' => false, 'status' => 'storage_retrieval_request_cancel_fail', 'sysMessage' => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } } } $session->storageRetrievalRequests = $newRequests; return $retVal; } /** * Get Cancel Storage Retrieval Request Details * * In order to cancel a hold, Voyager requires the patron details an item ID * and a recall ID. This function returns the item id and recall id as a string * separated by a pipe, which is then submitted as form data in Hold.php. This * value is then extracted by the CancelHolds function. * * @param array $details An array of item data * * @return string Data for use in a form field */ public function getCancelStorageRetrievalRequestDetails($details) { return $details['reqnum']; } /** * Renew My Items * * Function for attempting to renew a patron's items. The data in * $renewDetails['details'] is determined by getRenewDetails(). * * @param array $renewDetails An array of data required for renewing items * including the Patron ID and an array of renewal IDS * * @return array An array of renewal information keyed by item ID */ public function renewMyItems($renewDetails) { $this->checkIntermittentFailure(); // Simulate an account block at random. if ($this->checkRenewBlock()) { return [ 'blocks' => [ 'Simulated account block; try again and it will work eventually.' ], 'details' => [] ]; } // Set up successful return value. $finalResult = ['blocks' => false, 'details' => []]; // Grab transactions from session so we can modify them: $session = $this->getSession($renewDetails['patron']['id'] ?? null); $transactions = $session->transactions; foreach ($transactions as $i => $current) { // Only renew requested items: if (in_array($current['item_id'], $renewDetails['details'])) { if (!$this->isFailing(__METHOD__, 50)) { $transactions[$i]['rawduedate'] += 21 * 24 * 60 * 60; $transactions[$i]['dueStatus'] = $this->calculateDueStatus($transactions[$i]['rawduedate']); $transactions[$i]['duedate'] = $this->dateConverter->convertToDisplayDate( 'U', $transactions[$i]['rawduedate'] ); $transactions[$i]['renew'] = $transactions[$i]['renew'] + 1; $transactions[$i]['renewable'] = $transactions[$i]['renew'] < $transactions[$i]['renewLimit']; $finalResult['details'][$current['item_id']] = [ "success" => true, "new_date" => $transactions[$i]['duedate'], "new_time" => '', "item_id" => $current['item_id'], ]; } else { $finalResult['details'][$current['item_id']] = [ "success" => false, "new_date" => false, "item_id" => $current['item_id'], "sysMessage" => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } } } // Write modified transactions back to session; in-place changes do not // work due to ArrayObject eccentricities: $session->transactions = $transactions; return $finalResult; } /** * Get Renew Details * * In order to renew an item, Voyager requires the patron details and an item * id. This function returns the item id as a string which is then used * as submitted form data in checkedOut.php. This value is then extracted by * the RenewMyItems function. * * @param array $checkOutDetails An array of item data * * @return string Data for use in a form field */ public function getRenewDetails($checkOutDetails) { return $checkOutDetails['item_id']; } /** * Check if hold or recall available * * This is responsible for determining if an item is requestable * * @param string $id The Bib ID * @param array $data An Array of item data * @param patron $patron An array of patron data * * @return mixed An array of data on the request including * whether or not it is valid and a status message. Alternatively a boolean * true if request is valid, false if not. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function checkRequestIsValid($id, $data, $patron) { $this->checkIntermittentFailure(); if ($this->isFailing(__METHOD__, 10)) { return [ 'valid' => false, 'status' => rand() % 3 != 0 ? 'hold_error_blocked' : 'Demonstrating a custom failure' ]; } return [ 'valid' => true, 'status' => 'request_place_text' ]; } /** * Place Hold * * Attempts to place a hold or recall on a particular item and returns * an array with result details. * * @param array $holdDetails An array of item and patron data * * @return mixed An array of data on the request including * whether or not it was successful and a system message (if available) */ public function placeHold($holdDetails) { $this->checkIntermittentFailure(); // Simulate failure: if ($this->isFailing(__METHOD__, 50)) { return [ "success" => false, "sysMessage" => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } $session = $this->getSession($holdDetails['patron']['id'] ?? null); if (!isset($session->holds)) { $session->holds = new ArrayObject(); } $lastHold = count($session->holds) - 1; $nextId = $lastHold >= 0 ? $session->holds[$lastHold]['item_id'] + 1 : 0; // Figure out appropriate expiration date: if (!isset($holdDetails['requiredBy']) || empty($holdDetails['requiredBy']) ) { $expire = strtotime("now + 30 days"); } else { try { $expire = $this->dateConverter->convertFromDisplayDate( "U", $holdDetails['requiredBy'] ); } catch (DateException $e) { // Expiration date is invalid return [ 'success' => false, 'sysMessage' => 'hold_date_invalid' ]; } } if ($expire <= time()) { return [ 'success' => false, 'sysMessage' => 'hold_date_past' ]; } $requestGroup = ''; foreach ($this->getRequestGroups(null, null) as $group) { if (isset($holdDetails['requestGroupId']) && $group['id'] == $holdDetails['requestGroupId'] ) { $requestGroup = $group['name']; break; } } $session->holds->append( [ 'id' => $holdDetails['id'], 'source' => $this->getRecordSource(), 'location' => $holdDetails['pickUpLocation'], 'expire' => $this->dateConverter->convertToDisplayDate('U', $expire), 'create' => $this->dateConverter->convertToDisplayDate('U', time()), 'reqnum' => sprintf('%06d', $nextId), 'item_id' => $nextId, 'volume' => '', 'processed' => '', 'requestGroup' => $requestGroup ] ); return ['success' => true]; } /** * Check if storage retrieval request available * * This is responsible for determining if an item is requestable * * @param string $id The Bib ID * @param array $data An Array of item data * @param patron $patron An array of patron data * * @return mixed An array of data on the request including * whether or not it is valid and a status message. Alternatively a boolean * true if request is valid, false if not. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function checkStorageRetrievalRequestIsValid($id, $data, $patron) { $this->checkIntermittentFailure(); if (!$this->storageRetrievalRequests || $this->isFailing(__METHOD__, 10)) { return [ 'valid' => false, 'status' => rand() % 3 != 0 ? 'storage_retrieval_request_error_blocked' : 'Demonstrating a custom failure' ]; } return [ 'valid' => true, 'status' => 'storage_retrieval_request_place_text' ]; } /** * Place a Storage Retrieval Request * * Attempts to place a request on a particular item and returns * an array with result details. * * @param array $details An array of item and patron data * * @return mixed An array of data on the request including * whether or not it was successful and a system message (if available) */ public function placeStorageRetrievalRequest($details) { $this->checkIntermittentFailure(); if (!$this->storageRetrievalRequests) { return [ "success" => false, "sysMessage" => 'Storage Retrieval Requests are disabled.' ]; } // Simulate failure: if ($this->isFailing(__METHOD__, 50)) { return [ "success" => false, "sysMessage" => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } $session = $this->getSession($details['patron']['id'] ?? null); if (!isset($session->storageRetrievalRequests)) { $session->storageRetrievalRequests = new ArrayObject(); } $lastRequest = count($session->storageRetrievalRequests) - 1; $nextId = $lastRequest >= 0 ? $session->storageRetrievalRequests[$lastRequest]['item_id'] + 1 : 0; // Figure out appropriate expiration date: if (!isset($details['requiredBy']) || empty($details['requiredBy']) ) { $expire = strtotime("now + 30 days"); } else { try { $expire = $this->dateConverter->convertFromDisplayDate( "U", $details['requiredBy'] ); } catch (DateException $e) { // Expiration date is invalid return [ 'success' => false, 'sysMessage' => 'storage_retrieval_request_date_invalid' ]; } } if ($expire <= time()) { return [ 'success' => false, 'sysMessage' => 'storage_retrieval_request_date_past' ]; } $session->storageRetrievalRequests->append( [ 'id' => $details['id'], 'source' => $this->getRecordSource(), 'location' => $details['pickUpLocation'], 'expire' => $this->dateConverter->convertToDisplayDate('U', $expire), 'create' => $this->dateConverter->convertToDisplayDate('U', time()), 'processed' => rand() % 3 == 0 ? $this->dateConverter->convertToDisplayDate('U', $expire) : '', 'reqnum' => sprintf('%06d', $nextId), 'item_id' => $nextId ] ); return ['success' => true]; } /** * Check if ILL request available * * This is responsible for determining if an item is requestable * * @param string $id The Bib ID * @param array $data An Array of item data * @param patron $patron An array of patron data * * @return mixed An array of data on the request including * whether or not it is valid and a status message. Alternatively a boolean * true if request is valid, false if not. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function checkILLRequestIsValid($id, $data, $patron) { $this->checkIntermittentFailure(); if (!$this->ILLRequests || $this->isFailing(__METHOD__, 10)) { return [ 'valid' => false, 'status' => rand() % 3 != 0 ? 'ill_request_error_blocked' : 'Demonstrating a custom failure' ]; } return [ 'valid' => true, 'status' => 'ill_request_place_text' ]; } /** * Place ILL Request * * Attempts to place an ILL request on a particular item and returns * an array with result details * * @param array $details An array of item and patron data * * @return mixed An array of data on the request including * whether or not it was successful and a system message (if available) */ public function placeILLRequest($details) { $this->checkIntermittentFailure(); if (!$this->ILLRequests) { return [ 'success' => false, 'sysMessage' => 'ILL requests are disabled.' ]; } // Simulate failure: if ($this->isFailing(__METHOD__, 50)) { return [ 'success' => false, 'sysMessage' => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } $session = $this->getSession($details['patron']['id'] ?? null); if (!isset($session->ILLRequests)) { $session->ILLRequests = new ArrayObject(); } $lastRequest = count($session->ILLRequests) - 1; $nextId = $lastRequest >= 0 ? $session->ILLRequests[$lastRequest]['item_id'] + 1 : 0; // Figure out appropriate expiration date: if (!isset($details['requiredBy']) || empty($details['requiredBy']) ) { $expire = strtotime('now + 30 days'); } else { try { $expire = $this->dateConverter->convertFromDisplayDate( 'U', $details['requiredBy'] ); } catch (DateException $e) { // Expiration Date is invalid return [ 'success' => false, 'sysMessage' => 'ill_request_date_invalid' ]; } } if ($expire <= time()) { return [ 'success' => false, 'sysMessage' => 'ill_request_date_past' ]; } // Verify pickup library and location $pickupLocation = ''; $pickupLocations = $this->getILLPickupLocations( $details['id'], $details['pickUpLibrary'], $details['patron'] ); foreach ($pickupLocations as $location) { if ($location['id'] == $details['pickUpLibraryLocation']) { $pickupLocation = $location['name']; break; } } if (!$pickupLocation) { return [ 'success' => false, 'sysMessage' => 'ill_request_place_fail_missing' ]; } $session->ILLRequests->append( [ 'id' => $details['id'], 'source' => $this->getRecordSource(), 'location' => $pickupLocation, 'expire' => $this->dateConverter->convertToDisplayDate('U', $expire), 'create' => $this->dateConverter->convertToDisplayDate('U', time()), 'processed' => rand() % 3 == 0 ? $this->dateConverter->convertToDisplayDate('U', $expire) : '', 'reqnum' => sprintf('%06d', $nextId), 'item_id' => $nextId ] ); return ['success' => true]; } /** * Get ILL Pickup Libraries * * This is responsible for getting information on the possible pickup libraries * * @param string $id Record ID * @param array $patron Patron * * @return bool|array False if request not allowed, or an array of associative * arrays with libraries. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getILLPickupLibraries($id, $patron) { $this->checkIntermittentFailure(); if (!$this->ILLRequests) { return false; } $details = [ [ 'id' => 1, 'name' => 'Main Library', 'isDefault' => true ], [ 'id' => 2, 'name' => 'Branch Library', 'isDefault' => false ] ]; return $details; } /** * Get ILL Pickup Locations * * This is responsible for getting a list of possible pickup locations for a * library * * @param string $id Record ID * @param string $pickupLib Pickup library ID * @param array $patron Patron * * @return bool|array False if request not allowed, or an array of locations. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getILLPickupLocations($id, $pickupLib, $patron) { $this->checkIntermittentFailure(); switch ($pickupLib) { case 1: return [ [ 'id' => 1, 'name' => 'Circulation Desk', 'isDefault' => true ], [ 'id' => 2, 'name' => 'Reference Desk', 'isDefault' => false ] ]; case 2: return [ [ 'id' => 3, 'name' => 'Main Desk', 'isDefault' => false ], [ 'id' => 4, 'name' => 'Library Bus', 'isDefault' => true ] ]; } return []; } /** * Cancel ILL Request * * Attempts to Cancel an ILL request on a particular item. The * data in $cancelDetails['details'] is determined by * getCancelILLRequestDetails(). * * @param array $cancelDetails An array of item and patron data * * @return array An array of data on each request including * whether or not it was successful and a system message (if available) */ public function cancelILLRequests($cancelDetails) { $this->checkIntermittentFailure(); // Rewrite the items in the session, removing those the user wants to // cancel. $newRequests = new ArrayObject(); $retVal = ['count' => 0, 'items' => []]; $session = $this->getSession($cancelDetails['patron']['id'] ?? null); foreach ($session->ILLRequests as $current) { if (!in_array($current['reqnum'], $cancelDetails['details'])) { $newRequests->append($current); } else { if (!$this->isFailing(__METHOD__, 50)) { $retVal['count']++; $retVal['items'][$current['item_id']] = [ 'success' => true, 'status' => 'ill_request_cancel_success' ]; } else { $newRequests->append($current); $retVal['items'][$current['item_id']] = [ 'success' => false, 'status' => 'ill_request_cancel_fail', 'sysMessage' => 'Demonstrating failure; keep trying and ' . 'it will work eventually.' ]; } } } $session->ILLRequests = $newRequests; return $retVal; } /** * Get Cancel ILL Request Details * * @param array $details An array of item data * * @return string Data for use in a form field */ public function getCancelILLRequestDetails($details) { return $details['reqnum']; } /** * Change Password * * Attempts to change patron password (PIN code) * * @param array $details An array of patron id and old and new password: * * 'patron' The patron array from patronLogin * 'oldPassword' Old password * 'newPassword' New password * * @return array An array of data on the request including * whether or not it was successful and a system message (if available) * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function changePassword($details) { $this->checkIntermittentFailure(); if (!$this->isFailing(__METHOD__, 33)) { return ['success' => true, 'status' => 'change_password_ok']; } return [ 'success' => false, 'status' => 'An error has occurred', 'sysMessage' => 'Demonstrating failure; keep trying and it will work eventually.' ]; } /** * Public Function which specifies renew, hold and cancel settings. * * @param string $function The name of the feature to be checked * @param array $params Optional feature-specific parameters (array) * * @return array An array with key-value pairs. * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getConfig($function, $params = null) { $this->checkIntermittentFailure(); if ($function == 'Holds') { return [ 'HMACKeys' => 'id:item_id:level', 'extraHoldFields' => 'comments:requestGroup:pickUpLocation:requiredByDate', 'defaultRequiredDate' => 'driver:0:2:0', ]; } if ($function == 'Holdings') { return [ 'itemLimit' => $this->config['Holdings']['itemLimit'] ?? null, ]; } if ($function == 'StorageRetrievalRequests' && $this->storageRetrievalRequests ) { return [ 'HMACKeys' => 'id', 'extraFields' => 'comments:pickUpLocation:requiredByDate:item-issue', 'helpText' => 'This is a storage retrieval request help text' . ' with some <span style="color: red">styling</span>.' ]; } if ($function == 'ILLRequests' && $this->ILLRequests) { return [ 'enabled' => true, 'HMACKeys' => 'number', 'extraFields' => 'comments:pickUpLibrary:pickUpLibraryLocation:requiredByDate', 'defaultRequiredDate' => '0:1:0', 'helpText' => 'This is an ILL request help text' . ' with some <span style="color: red">styling</span>.' ]; } if ($function == 'changePassword') { return $this->config['changePassword'] ?? ['minLength' => 4, 'maxLength' => 20]; } if ($function == 'getMyTransactionHistory') { if (empty($this->config['TransactionHistory']['enabled'])) { return false; } $config = [ 'sort' => [ 'checkout desc' => 'sort_checkout_date_desc', 'checkout asc' => 'sort_checkout_date_asc', 'return desc' => 'sort_return_date_desc', 'return asc' => 'sort_return_date_asc', 'due desc' => 'sort_due_date_desc', 'due asc' => 'sort_due_date_asc' ], 'default_sort' => 'checkout desc' ]; if ($this->config['Loans']['paging'] ?? false) { $config['max_results'] = $this->config['Loans']['max_page_size'] ?? 100; } return $config; } if ('getMyTransactions' === $function) { if (empty($this->config['Loans']['paging'])) { return []; } return [ 'max_results' => $this->config['Loans']['max_page_size'] ?? 100, 'sort' => [ 'due desc' => 'sort_due_date_desc', 'due asc' => 'sort_due_date_asc', 'title asc' => 'sort_title' ], 'default_sort' => 'due asc' ]; } if ($function == 'patronLogin') { return [ 'loginMethod' => $this->config['Catalog']['loginMethod'] ?? 'password' ]; } return []; } /** * Get bib records for recently returned items. * * @param int $limit Maximum number of records to retrieve (default = 30) * @param int $maxage The maximum number of days to consider "recently * returned." * @param array $patron Patron Data * * @return array * * @SuppressWarnings(PHPMD.UnusedFormalParameter) */ public function getRecentlyReturnedBibs($limit = 30, $maxage = 30, $patron = null ) { // This is similar to getNewItems for demo purposes. $results = $this->getNewItems(1, $limit, $maxage); return $results['results']; } /** * Get bib records for "trending" items (recently returned with high usage). * * @param int $limit Maximum number of records to retrieve (default = 30) * @param int $maxage The maximum number of days' worth of data to examine. * @param array $patron Patron Data * * @return array */ public function getTrendingBibs($limit = 30, $maxage = 30, $patron = null) { // This is similar to getRecentlyReturnedBibs for demo purposes. return $this->getRecentlyReturnedBibs($limit, $maxage, $patron); } }
1
31,676
@demiankatz I'm not sure that this is very useful. I just did it to get started. If we had a small handful of predetermined prefixes that were randomly chosen, it would be easier to distinguish them from the rest of the call number. Would that be preferable?
vufind-org-vufind
php
@@ -313,16 +313,6 @@ func createUpstreamRequest(rw http.ResponseWriter, r *http.Request) (*http.Reque // Original incoming server request may be canceled by the // user or by std lib(e.g. too many idle connections). ctx, cancel := context.WithCancel(r.Context()) - if cn, ok := rw.(http.CloseNotifier); ok { - notifyChan := cn.CloseNotify() - go func() { - select { - case <-notifyChan: - cancel() - case <-ctx.Done(): - } - }() - } outreq := r.WithContext(ctx) // includes shallow copies of maps, but okay
1
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package proxy is middleware that proxies HTTP requests. package proxy import ( "context" "errors" "net" "net/http" "net/url" "strings" "sync/atomic" "time" "github.com/mholt/caddy/caddyhttp/httpserver" ) // Proxy represents a middleware instance that can proxy requests. type Proxy struct { Next httpserver.Handler Upstreams []Upstream } // Upstream manages a pool of proxy upstream hosts. type Upstream interface { // The path this upstream host should be routed on From() string // Selects an upstream host to be routed to. It // should return a suitable upstream host, or nil // if no such hosts are available. Select(*http.Request) *UpstreamHost // Checks if subpath is not an ignored path AllowedPath(string) bool // Gets the duration of the headstart the first // connection is given in the Go standard library's // implementation of "Happy Eyeballs" when DualStack // is enabled in net.Dialer. GetFallbackDelay() time.Duration // Gets how long to try selecting upstream hosts // in the case of cascading failures. GetTryDuration() time.Duration // Gets how long to wait between selecting upstream // hosts in the case of cascading failures. GetTryInterval() time.Duration // Gets the number of upstream hosts. GetHostCount() int // Gets how long to wait before timing out // the request GetTimeout() time.Duration // Stops the upstream from proxying requests to shutdown goroutines cleanly. Stop() error } // UpstreamHostDownFunc can be used to customize how Down behaves. type UpstreamHostDownFunc func(*UpstreamHost) bool // UpstreamHost represents a single proxy upstream type UpstreamHost struct { // This field is read & written to concurrently, so all access must use // atomic operations. Conns int64 // must be first field to be 64-bit aligned on 32-bit systems MaxConns int64 Name string // hostname of this upstream host UpstreamHeaders http.Header DownstreamHeaders http.Header FailTimeout time.Duration CheckDown UpstreamHostDownFunc WithoutPathPrefix string ReverseProxy *ReverseProxy Fails int32 // This is an int32 so that we can use atomic operations to do concurrent // reads & writes to this value. The default value of 0 indicates that it // is healthy and any non-zero value indicates unhealthy. Unhealthy int32 HealthCheckResult atomic.Value UpstreamHeaderReplacements headerReplacements DownstreamHeaderReplacements headerReplacements } // Down checks whether the upstream host is down or not. // Down will try to use uh.CheckDown first, and will fall // back to some default criteria if necessary. func (uh *UpstreamHost) Down() bool { if uh.CheckDown == nil { // Default settings return atomic.LoadInt32(&uh.Unhealthy) != 0 || atomic.LoadInt32(&uh.Fails) > 0 } return uh.CheckDown(uh) } // Full checks whether the upstream host has reached its maximum connections func (uh *UpstreamHost) Full() bool { return uh.MaxConns > 0 && atomic.LoadInt64(&uh.Conns) >= uh.MaxConns } // Available checks whether the upstream host is available for proxying to func (uh *UpstreamHost) Available() bool { return !uh.Down() && !uh.Full() } // ServeHTTP satisfies the httpserver.Handler interface. func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { // start by selecting most specific matching upstream config upstream := p.match(r) if upstream == nil { return p.Next.ServeHTTP(w, r) } // this replacer is used to fill in header field values replacer := httpserver.NewReplacer(r, nil, "") // outreq is the request that makes a roundtrip to the backend outreq, cancel := createUpstreamRequest(w, r) defer cancel() // If we have more than one upstream host defined and if retrying is enabled // by setting try_duration to a non-zero value, caddy will try to // retry the request at a different host if the first one failed. // // This requires us to possibly rewind and replay the request body though, // which in turn requires us to buffer the request body first. // // An unbuffered request is usually preferrable, because it reduces latency // as well as memory usage. Furthermore it enables different kinds of // HTTP streaming applications like gRPC for instance. requiresBuffering := upstream.GetHostCount() > 1 && upstream.GetTryDuration() != 0 if requiresBuffering { body, err := newBufferedBody(outreq.Body) if err != nil { return http.StatusBadRequest, errors.New("failed to read downstream request body") } if body != nil { outreq.Body = body } } // The keepRetrying function will return true if we should // loop and try to select another host, or false if we // should break and stop retrying. start := time.Now() keepRetrying := func(backendErr error) bool { // if downstream has canceled the request, break if backendErr == context.Canceled { return false } // if we've tried long enough, break if time.Since(start) >= upstream.GetTryDuration() { return false } // otherwise, wait and try the next available host time.Sleep(upstream.GetTryInterval()) return true } var backendErr error for { // since Select() should give us "up" hosts, keep retrying // hosts until timeout (or until we get a nil host). host := upstream.Select(r) if host == nil { if backendErr == nil { backendErr = errors.New("no hosts available upstream") } if !keepRetrying(backendErr) { break } continue } if rr, ok := w.(*httpserver.ResponseRecorder); ok && rr.Replacer != nil { rr.Replacer.Set("upstream", host.Name) } proxy := host.ReverseProxy // a backend's name may contain more than just the host, // so we parse it as a URL to try to isolate the host. if nameURL, err := url.Parse(host.Name); err == nil { outreq.Host = nameURL.Host if proxy == nil { proxy = NewSingleHostReverseProxy(nameURL, host.WithoutPathPrefix, http.DefaultMaxIdleConnsPerHost, upstream.GetTimeout(), upstream.GetFallbackDelay(), ) } // use upstream credentials by default if outreq.Header.Get("Authorization") == "" && nameURL.User != nil { pwd, _ := nameURL.User.Password() outreq.SetBasicAuth(nameURL.User.Username(), pwd) } } else { outreq.Host = host.Name } if proxy == nil { return http.StatusInternalServerError, errors.New("proxy for host '" + host.Name + "' is nil") } // set headers for request going upstream if host.UpstreamHeaders != nil { // modify headers for request that will be sent to the upstream host mutateHeadersByRules(outreq.Header, host.UpstreamHeaders, replacer, host.UpstreamHeaderReplacements) if hostHeaders, ok := outreq.Header["Host"]; ok && len(hostHeaders) > 0 { outreq.Host = hostHeaders[len(hostHeaders)-1] } } // prepare a function that will update response // headers coming back downstream var downHeaderUpdateFn respUpdateFn if host.DownstreamHeaders != nil { downHeaderUpdateFn = createRespHeaderUpdateFn(host.DownstreamHeaders, replacer, host.DownstreamHeaderReplacements) } // Before we retry the request we have to make sure // that the body is rewound to it's beginning. if bb, ok := outreq.Body.(*bufferedBody); ok { if err := bb.rewind(); err != nil { return http.StatusInternalServerError, errors.New("unable to rewind downstream request body") } } // tell the proxy to serve the request // // NOTE: // The call to proxy.ServeHTTP can theoretically panic. // To prevent host.Conns from getting out-of-sync we thus have to // make sure that it's _always_ correctly decremented afterwards. func() { atomic.AddInt64(&host.Conns, 1) defer atomic.AddInt64(&host.Conns, -1) backendErr = proxy.ServeHTTP(w, outreq, downHeaderUpdateFn) }() // if no errors, we're done here if backendErr == nil { return 0, nil } if backendErr == httpserver.ErrMaxBytesExceeded { return http.StatusRequestEntityTooLarge, backendErr } if backendErr == context.Canceled { return CustomStatusContextCancelled, backendErr } // failover; remember this failure for some time if // request failure counting is enabled timeout := host.FailTimeout if timeout > 0 { atomic.AddInt32(&host.Fails, 1) go func(host *UpstreamHost, timeout time.Duration) { time.Sleep(timeout) atomic.AddInt32(&host.Fails, -1) }(host, timeout) } // if we've tried long enough, break if !keepRetrying(backendErr) { break } } return http.StatusBadGateway, backendErr } // match finds the best match for a proxy config based on r. func (p Proxy) match(r *http.Request) Upstream { var u Upstream var longestMatch int for _, upstream := range p.Upstreams { basePath := upstream.From() if !httpserver.Path(r.URL.Path).Matches(basePath) || !upstream.AllowedPath(r.URL.Path) { continue } if len(basePath) > longestMatch { longestMatch = len(basePath) u = upstream } } return u } // createUpstreamRequest shallow-copies r into a new request // that can be sent upstream. // // Derived from reverseproxy.go in the standard Go httputil package. func createUpstreamRequest(rw http.ResponseWriter, r *http.Request) (*http.Request, context.CancelFunc) { // Original incoming server request may be canceled by the // user or by std lib(e.g. too many idle connections). ctx, cancel := context.WithCancel(r.Context()) if cn, ok := rw.(http.CloseNotifier); ok { notifyChan := cn.CloseNotify() go func() { select { case <-notifyChan: cancel() case <-ctx.Done(): } }() } outreq := r.WithContext(ctx) // includes shallow copies of maps, but okay // We should set body to nil explicitly if request body is empty. // For server requests the Request Body is always non-nil. if r.ContentLength == 0 { outreq.Body = nil } // We are modifying the same underlying map from req (shallow // copied above) so we only copy it if necessary. copiedHeaders := false // Remove hop-by-hop headers listed in the "Connection" header. // See RFC 2616, section 14.10. if c := outreq.Header.Get("Connection"); c != "" { for _, f := range strings.Split(c, ",") { if f = strings.TrimSpace(f); f != "" { if !copiedHeaders { outreq.Header = make(http.Header) copyHeader(outreq.Header, r.Header) copiedHeaders = true } outreq.Header.Del(f) } } } // Remove hop-by-hop headers to the backend. Especially // important is "Connection" because we want a persistent // connection, regardless of what the client sent to us. for _, h := range hopHeaders { if outreq.Header.Get(h) != "" { if !copiedHeaders { outreq.Header = make(http.Header) copyHeader(outreq.Header, r.Header) copiedHeaders = true } outreq.Header.Del(h) } } if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil { // If we aren't the first proxy, retain prior // X-Forwarded-For information as a comma+space // separated list and fold multiple headers into one. if prior, ok := outreq.Header["X-Forwarded-For"]; ok { clientIP = strings.Join(prior, ", ") + ", " + clientIP } outreq.Header.Set("X-Forwarded-For", clientIP) } return outreq, cancel } func createRespHeaderUpdateFn(rules http.Header, replacer httpserver.Replacer, replacements headerReplacements) respUpdateFn { return func(resp *http.Response) { mutateHeadersByRules(resp.Header, rules, replacer, replacements) } } func mutateHeadersByRules(headers, rules http.Header, repl httpserver.Replacer, replacements headerReplacements) { for ruleField, ruleValues := range rules { if strings.HasPrefix(ruleField, "+") { for _, ruleValue := range ruleValues { replacement := repl.Replace(ruleValue) if len(replacement) > 0 { headers.Add(strings.TrimPrefix(ruleField, "+"), replacement) } } } else if strings.HasPrefix(ruleField, "-") { headers.Del(strings.TrimPrefix(ruleField, "-")) } else if len(ruleValues) > 0 { replacement := repl.Replace(ruleValues[len(ruleValues)-1]) if len(replacement) > 0 { headers.Set(ruleField, replacement) } } } for ruleField, ruleValues := range replacements { for _, ruleValue := range ruleValues { // Replace variables in replacement string replacement := repl.Replace(ruleValue.to) original := headers.Get(ruleField) if len(replacement) > 0 && len(original) > 0 { // Replace matches in original string with replacement string replaced := ruleValue.regexp.ReplaceAllString(original, replacement) headers.Set(ruleField, replaced) } } } } const CustomStatusContextCancelled = 499
1
13,255
Why is this deleted too?
caddyserver-caddy
go
@@ -8,9 +8,9 @@ using System.Threading.Tasks; namespace Datadog.Trace.Agent { - internal class ApiWebResponse : IApiResponse + internal class ApiWebResponse : IApiResponse, IDisposable { - private HttpWebResponse _response; + private readonly HttpWebResponse _response; public ApiWebResponse(HttpWebResponse response) {
1
using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Net; using System.Text; using System.Threading.Tasks; namespace Datadog.Trace.Agent { internal class ApiWebResponse : IApiResponse { private HttpWebResponse _response; public ApiWebResponse(HttpWebResponse response) { _response = response; } public int StatusCode => (int)_response.StatusCode; public long ContentLength => _response.ContentLength; public async Task<string> ReadAsStringAsync() { using (var responseStream = _response.GetResponseStream()) { var reader = new StreamReader(responseStream); return await reader.ReadToEndAsync().ConfigureAwait(false); } } } }
1
17,494
Given that IApiResponse is already made disposable, this is not necessary.
DataDog-dd-trace-dotnet
.cs
@@ -97,7 +97,10 @@ class ItemPricesOverlay extends Overlay switch (action) { case ITEM_USE_ON_WIDGET: - if (!menuEntry.getTarget().contains("High Level Alchemy") || !plugin.isShowAlchProfit()) return null; + if (!menuEntry.getTarget().contains("High Level Alchemy") || !plugin.isShowAlchProfit()) + { + return null; + } case WIDGET_DEFAULT: case ITEM_USE: case ITEM_FIRST_OPTION:
1
/* * Copyright (c) 2018, Charlie Waters * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package net.runelite.client.plugins.itemprices; import java.awt.Color; import java.awt.Dimension; import java.awt.Graphics2D; import javax.inject.Inject; import javax.inject.Singleton; import net.runelite.api.Client; import net.runelite.api.Constants; import net.runelite.api.InventoryID; import net.runelite.api.Item; import net.runelite.api.ItemDefinition; import net.runelite.api.ItemContainer; import net.runelite.api.ItemID; import net.runelite.api.MenuAction; import net.runelite.api.MenuEntry; import net.runelite.api.widgets.WidgetID; import net.runelite.api.widgets.WidgetInfo; import net.runelite.client.game.ItemManager; import net.runelite.client.ui.overlay.Overlay; import net.runelite.client.ui.overlay.OverlayPosition; import net.runelite.client.ui.overlay.tooltip.Tooltip; import net.runelite.client.ui.overlay.tooltip.TooltipManager; import net.runelite.client.util.ColorUtil; import net.runelite.client.util.StackFormatter; @Singleton class ItemPricesOverlay extends Overlay { private static final int INVENTORY_ITEM_WIDGETID = WidgetInfo.INVENTORY.getPackedId(); private static final int BANK_INVENTORY_ITEM_WIDGETID = WidgetInfo.BANK_INVENTORY_ITEMS_CONTAINER.getPackedId(); private static final int BANK_ITEM_WIDGETID = WidgetInfo.BANK_ITEM_CONTAINER.getPackedId(); private final Client client; private final ItemPricesPlugin plugin; private final TooltipManager tooltipManager; private final StringBuilder itemStringBuilder = new StringBuilder(); @Inject ItemManager itemManager; @Inject ItemPricesOverlay(final Client client, final ItemPricesPlugin plugin, final TooltipManager tooltipManager) { setPosition(OverlayPosition.DYNAMIC); this.client = client; this.plugin = plugin; this.tooltipManager = tooltipManager; } @Override public Dimension render(Graphics2D graphics) { if (client.isMenuOpen()) { return null; } final MenuEntry[] menuEntries = client.getMenuEntries(); final int last = menuEntries.length - 1; if (last < 0) { return null; } final MenuEntry menuEntry = menuEntries[last]; final MenuAction action = MenuAction.of(menuEntry.getType()); final int widgetId = menuEntry.getParam1(); final int groupId = WidgetInfo.TO_GROUP(widgetId); // Tooltip action type handling switch (action) { case ITEM_USE_ON_WIDGET: if (!menuEntry.getTarget().contains("High Level Alchemy") || !plugin.isShowAlchProfit()) return null; case WIDGET_DEFAULT: case ITEM_USE: case ITEM_FIRST_OPTION: case ITEM_SECOND_OPTION: case ITEM_THIRD_OPTION: case ITEM_FOURTH_OPTION: case ITEM_FIFTH_OPTION: // Item tooltip values switch (groupId) { case WidgetID.INVENTORY_GROUP_ID: if (plugin.isHideInventory()) { return null; } // intentional fallthrough case WidgetID.BANK_GROUP_ID: case WidgetID.BANK_INVENTORY_GROUP_ID: // Make tooltip final String text = makeValueTooltip(menuEntry); if (text != null) { tooltipManager.add(new Tooltip(ColorUtil.prependColorTag(text, new Color(238, 238, 238)))); } break; } break; } return null; } private String makeValueTooltip(MenuEntry menuEntry) { // Disabling both disables all value tooltips if (!plugin.isShowGEPrice() && !plugin.isShowHAValue()) { return null; } final int widgetId = menuEntry.getParam1(); ItemContainer container = null; // Inventory item if (widgetId == INVENTORY_ITEM_WIDGETID || widgetId == BANK_INVENTORY_ITEM_WIDGETID) { container = client.getItemContainer(InventoryID.INVENTORY); } // Bank item else if (widgetId == BANK_ITEM_WIDGETID) { container = client.getItemContainer(InventoryID.BANK); } if (container == null) { return null; } // Find the item in the container to get stack size final Item[] items = container.getItems(); final int index = menuEntry.getParam0(); if (index < items.length) { final Item item = items[index]; return getItemStackValueText(item); } return null; } private String getItemStackValueText(Item item) { int id = item.getId(); int qty = item.getQuantity(); // Special case for coins and platinum tokens if (id == ItemID.COINS_995) { return StackFormatter.formatNumber(qty) + " gp"; } else if (id == ItemID.PLATINUM_TOKEN) { return StackFormatter.formatNumber(qty * 1000) + " gp"; } ItemDefinition itemDef = itemManager.getItemDefinition(id); if (itemDef.getNote() != -1) { id = itemDef.getLinkedNoteId(); itemDef = itemManager.getItemDefinition(id); } // Only check prices for things with store prices if (itemDef.getPrice() <= 0) { return null; } int gePrice = 0; int haPrice = 0; int haProfit = 0; final int itemHaPrice = Math.round(itemDef.getPrice() * Constants.HIGH_ALCHEMY_MULTIPLIER); if (plugin.isShowGEPrice()) { gePrice = itemManager.getItemPrice(id); } if (plugin.isShowHAValue()) { haPrice = itemManager.getAlchValue(id); } if (gePrice > 0 && itemHaPrice > 0 && plugin.isShowAlchProfit()) { haProfit = calculateHAProfit(itemHaPrice, gePrice); } if (gePrice > 0 || haPrice > 0) { return stackValueText(qty, gePrice, haPrice, haProfit); } return null; } private String stackValueText(int qty, int gePrice, int haValue, int haProfit) { if (gePrice > 0) { itemStringBuilder.append("EX: ") .append(StackFormatter.quantityToStackSize(gePrice * qty)) .append(" gp"); if (plugin.isShowEA() && qty > 1) { itemStringBuilder.append(" (") .append(StackFormatter.quantityToStackSize(gePrice)) .append(" ea)"); } } if (haValue > 0) { if (gePrice > 0) { itemStringBuilder.append("</br>"); } itemStringBuilder.append("HA: ") .append(StackFormatter.quantityToStackSize(haValue * qty)) .append(" gp"); if (plugin.isShowEA() && qty > 1) { itemStringBuilder.append(" (") .append(StackFormatter.quantityToStackSize(haValue)) .append(" ea)"); } } if (haProfit != 0) { Color haColor = haProfitColor(haProfit); itemStringBuilder.append("</br>"); itemStringBuilder.append("HA Profit: ") .append(ColorUtil.wrapWithColorTag(String.valueOf(haProfit * qty), haColor)) .append(" gp"); if (plugin.isShowEA() && qty > 1) { itemStringBuilder.append(" (") .append(ColorUtil.wrapWithColorTag(String.valueOf(haProfit), haColor)) .append(" ea)"); } } // Build string and reset builder final String text = itemStringBuilder.toString(); itemStringBuilder.setLength(0); return text; } private int calculateHAProfit(int haPrice, int gePrice) { int natureRunePrice = itemManager.getItemPrice(ItemID.NATURE_RUNE); return haPrice - gePrice - natureRunePrice; } private static Color haProfitColor(int haProfit) { return haProfit >= 0 ? Color.GREEN : Color.RED; } }
1
15,213
client.getSelectedSpellName() can be used for this
open-osrs-runelite
java
@@ -14,6 +14,12 @@ """Common formatting methods.""" +# Filename patterns. +VIOLATION_JSON_FMT = 'violations.{}.{}.{}.json' +SCANNER_OUTPUT_CSV_FMT = 'scanner_output_base.{}.csv' +FINDINGS_FILENAME = 'forseti_findings_{}.json' + +# Timestamps. # Example: '2018-03-01T21:31:52' TIMESTAMP_UTC_OFFSET = '%Y-%m-%dT%H:%M:%S%z'
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common formatting methods.""" # Example: '2018-03-01T21:31:52' TIMESTAMP_UTC_OFFSET = '%Y-%m-%dT%H:%M:%S%z' # Example: '2018-03-01T21:32:24.491644' TIMESTAMP_MICROS = '%Y-%m-%dT%H:%M:%S.%f' # Example: '2018-03-01T21:33:59Z' TIMESTAMP_TIMEZONE_NAME = '%Y-%m-%dT%H:%M:%SZ' # Example: '01 March 2018 - 21:38:12' TIMESTAMP_HUMAN_READABLE = '%d %B %Y - %H:%M:%S'
1
29,257
Please alpha sort these filenames.
forseti-security-forseti-security
py
@@ -147,17 +147,6 @@ func (p *Protocol) validateTransfer(_ context.Context, act action.Action) error if tsf.TotalSize() > TransferSizeLimit { return errors.Wrap(action.ErrActPool, "oversized data") } - // Reject transfer of negative amount - if tsf.Amount().Sign() < 0 { - return errors.Wrap(action.ErrBalance, "negative value") - } - // Reject transfer of negative gas price - if tsf.GasPrice().Sign() < 0 { - return errors.Wrap(action.ErrGasPrice, "negative value") - } - // check if recipient's address is valid - if _, err := address.FromString(tsf.Recipient()); err != nil { - return errors.Wrapf(err, "error when validating recipient's address %s", tsf.Recipient()) - } - return nil + + return tsf.SelfCheck() }
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package account import ( "context" "math/big" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/state" "github.com/iotexproject/iotex-proto/golang/iotextypes" ) // TransferSizeLimit is the maximum size of transfer allowed const TransferSizeLimit = 32 * 1024 // handleTransfer handles a transfer func (p *Protocol) handleTransfer(ctx context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) { actionCtx := protocol.MustGetActionCtx(ctx) bcCtx := protocol.MustGetBlockchainCtx(ctx) blkCtx := protocol.MustGetBlockCtx(ctx) tsf, ok := act.(*action.Transfer) if !ok { return nil, nil } // check sender sender, err := accountutil.LoadOrCreateAccount(sm, actionCtx.Caller.String()) if err != nil { return nil, errors.Wrapf(err, "failed to load or create the account of sender %s", actionCtx.Caller.String()) } if blkCtx.GasLimit < actionCtx.IntrinsicGas { return nil, action.ErrHitGasLimit } gasFee := big.NewInt(0).Mul(tsf.GasPrice(), big.NewInt(0).SetUint64(actionCtx.IntrinsicGas)) if big.NewInt(0).Add(tsf.Amount(), gasFee).Cmp(sender.Balance) == 1 { return nil, errors.Wrapf( state.ErrNotEnoughBalance, "sender %s balance %s, required amount %s", actionCtx.Caller.String(), sender.Balance, big.NewInt(0).Add(tsf.Amount(), gasFee), ) } hu := config.NewHeightUpgrade(&bcCtx.Genesis) if hu.IsPre(config.Pacific, blkCtx.BlockHeight) { // charge sender gas if err := sender.SubBalance(gasFee); err != nil { return nil, errors.Wrapf(err, "failed to charge the gas for sender %s", actionCtx.Caller.String()) } if p.depositGas != nil { if err := p.depositGas(ctx, sm, gasFee); err != nil { return nil, err } } } recipientAddr, err := address.FromString(tsf.Recipient()) if err != nil { return nil, errors.Wrapf(err, "failed to decode recipient address %s", tsf.Recipient()) } recipientAcct, err := accountutil.LoadAccount(sm, hash.BytesToHash160(recipientAddr.Bytes())) if err == nil && recipientAcct.IsContract() { // update sender Nonce accountutil.SetNonce(tsf, sender) // put updated sender's state to trie if err := accountutil.StoreAccount(sm, actionCtx.Caller.String(), sender); err != nil { return nil, errors.Wrap(err, "failed to update pending account changes to trie") } if hu.IsPost(config.Pacific, blkCtx.BlockHeight) { if p.depositGas != nil { if err := p.depositGas(ctx, sm, gasFee); err != nil { return nil, err } } } return &action.Receipt{ Status: uint64(iotextypes.ReceiptStatus_Failure), BlockHeight: blkCtx.BlockHeight, ActionHash: actionCtx.ActionHash, GasConsumed: actionCtx.IntrinsicGas, ContractAddress: p.addr.String(), }, nil } // update sender Balance if err := sender.SubBalance(tsf.Amount()); err != nil { return nil, errors.Wrapf(err, "failed to update the Balance of sender %s", actionCtx.Caller.String()) } // update sender Nonce accountutil.SetNonce(tsf, sender) // put updated sender's state to trie if err := accountutil.StoreAccount(sm, actionCtx.Caller.String(), sender); err != nil { return nil, errors.Wrap(err, "failed to update pending account changes to trie") } // check recipient recipient, err := accountutil.LoadOrCreateAccount(sm, tsf.Recipient()) if err != nil { return nil, errors.Wrapf(err, "failed to load or create the account of recipient %s", tsf.Recipient()) } if err := recipient.AddBalance(tsf.Amount()); err != nil { return nil, errors.Wrapf(err, "failed to update the Balance of recipient %s", tsf.Recipient()) } // put updated recipient's state to trie if err := accountutil.StoreAccount(sm, tsf.Recipient(), recipient); err != nil { return nil, errors.Wrap(err, "failed to update pending account changes to trie") } if hu.IsPost(config.Pacific, blkCtx.BlockHeight) { if p.depositGas != nil { if err := p.depositGas(ctx, sm, gasFee); err != nil { return nil, err } } } return &action.Receipt{ Status: uint64(iotextypes.ReceiptStatus_Success), BlockHeight: blkCtx.BlockHeight, ActionHash: actionCtx.ActionHash, GasConsumed: actionCtx.IntrinsicGas, ContractAddress: p.addr.String(), }, nil } // validateTransfer validates a transfer func (p *Protocol) validateTransfer(_ context.Context, act action.Action) error { tsf, ok := act.(*action.Transfer) if !ok { return nil } // Reject oversized transfer if tsf.TotalSize() > TransferSizeLimit { return errors.Wrap(action.ErrActPool, "oversized data") } // Reject transfer of negative amount if tsf.Amount().Sign() < 0 { return errors.Wrap(action.ErrBalance, "negative value") } // Reject transfer of negative gas price if tsf.GasPrice().Sign() < 0 { return errors.Wrap(action.ErrGasPrice, "negative value") } // check if recipient's address is valid if _, err := address.FromString(tsf.Recipient()); err != nil { return errors.Wrapf(err, "error when validating recipient's address %s", tsf.Recipient()) } return nil }
1
21,466
is this already called by actPool.validate()?
iotexproject-iotex-core
go
@@ -707,6 +707,16 @@ void updateStereoBonds(RWMOL_SPTR product, const ROMol &reactant, if (pStart->getTotalDegree() < 3 || pEnd->getTotalDegree() < 3) { pBond->setStereo(Bond::BondStereo::STEREONONE); + } else { + // Ring bonds shouldn't be marked as STEREOANY + + if (!product->getRingInfo()->isInitialized()) { + MolOps::findSSSR(*product); + } + + if (product->getRingInfo()->numBondRings(pBond->getIdx()) > 0) { + pBond->setStereo(Bond::BondStereo::STEREONONE); + } } continue;
1
// // Copyright (c) 2014-2017, Novartis Institutes for BioMedical Research Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Novartis Institutes for BioMedical Research Inc. // nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <GraphMol/ChemReactions/Reaction.h> #include <GraphMol/Substruct/SubstructMatch.h> #include <GraphMol/QueryOps.h> #include <boost/dynamic_bitset.hpp> #include <boost/foreach.hpp> #include <map> #include <algorithm> #include <GraphMol/ChemTransforms/ChemTransforms.h> #include <GraphMol/Descriptors/MolDescriptors.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include "GraphMol/ChemReactions/ReactionRunner.h" #include <RDGeneral/Invariant.h> #include <GraphMol/MonomerInfo.h> #include <GraphMol/Chirality.h> namespace RDKit { typedef std::vector<MatchVectType> VectMatchVectType; typedef std::vector<VectMatchVectType> VectVectMatchVectType; namespace { const std::string WAS_DUMMY = "was_dummy"; // was the atom originally a dummy in product } // namespace namespace ReactionRunnerUtils { struct ReactantProductAtomMapping { ReactantProductAtomMapping(unsigned lenghtBitSet) { mappedAtoms.resize(lenghtBitSet); skippedAtoms.resize(lenghtBitSet); } boost::dynamic_bitset<> mappedAtoms; boost::dynamic_bitset<> skippedAtoms; std::map<unsigned int, std::vector<unsigned int>> reactProdAtomMap; std::map<unsigned int, unsigned int> prodReactAtomMap; std::map<unsigned int, unsigned int> prodAtomBondMap; // maps (atom map number,atom map number) pairs in the reactant template // to whether or not they are bonded in the template. std::map<std::pair<unsigned int, unsigned int>, unsigned int> reactantTemplateAtomBonds; }; namespace { //! returns whether or not all reactants matched const unsigned int MatchAll = UINT_MAX; /** * A storage class to find and store a StereoBond End Atom's * corresponding anchor and non-anchor neighbors. * * The class is agnostic about the stereo type of the bond (E/Z or CIS/TRANS) */ class StereoBondEndCap { private: unsigned m_anchor; const Atom *mp_nonAnchor = nullptr; StereoBondEndCap() = delete; StereoBondEndCap(const StereoBondEndCap &) = delete; StereoBondEndCap &operator=(const StereoBondEndCap &) = delete; public: StereoBondEndCap(const ROMol &mol, const Atom *atom, const Atom *otherDblBndAtom, const unsigned stereoAtomIdx) : m_anchor(stereoAtomIdx) { PRECONDITION(atom, "no atom"); PRECONDITION(otherDblBndAtom, "no atom"); PRECONDITION(atom->getTotalDegree() <= 3, "Stereo Bond extremes must have less than four neighbors"); const auto nbrIdxItr = mol.getAtomNeighbors(atom); const unsigned otherIdx = otherDblBndAtom->getIdx(); auto isNonAnchor = [&otherIdx, &stereoAtomIdx](const unsigned &nbrIdx) { return nbrIdx != otherIdx && nbrIdx != stereoAtomIdx; }; auto nonAnchorItr = std::find_if(nbrIdxItr.first, nbrIdxItr.second, isNonAnchor); if (nonAnchorItr != nbrIdxItr.second) { mp_nonAnchor = mol.getAtomWithIdx(*nonAnchorItr); } } StereoBondEndCap(StereoBondEndCap &&) = default; StereoBondEndCap &operator=(StereoBondEndCap &&) = default; bool hasNonAnchor() const { return mp_nonAnchor != nullptr; } unsigned getAnchorIdx() const { return m_anchor; } unsigned getNonAnchorIdx() const { return mp_nonAnchor->getIdx(); } std::pair<UINT_VECT, bool> getProductAnchorCandidates( ReactantProductAtomMapping *mapping) { auto &react2Prod = mapping->reactProdAtomMap; bool swapStereo = false; auto newAnchorMatches = react2Prod.find(getAnchorIdx()); if (newAnchorMatches != react2Prod.end()) { // The corresponding StereoAtom exists in the product return {newAnchorMatches->second, swapStereo}; } else if (hasNonAnchor()) { // The non-StereoAtom neighbor exists in the product newAnchorMatches = react2Prod.find(getNonAnchorIdx()); if (newAnchorMatches != react2Prod.end()) { swapStereo = true; return {newAnchorMatches->second, swapStereo}; } } // None of the neighbors survived the reaction return {{}, swapStereo}; } }; const Atom *findHighestCIPNeighbor(const Atom *atom, const Atom *skipAtom) { PRECONDITION(atom, "bad atom"); unsigned bestCipRank = 0; const Atom *bestCipRankedAtom = nullptr; const auto &mol = atom->getOwningMol(); for (const auto &index : boost::make_iterator_range(mol.getAtomNeighbors(atom))) { const auto neighbor = mol[index]; if (neighbor == skipAtom) { continue; } unsigned cip = 0; if (!neighbor->getPropIfPresent(common_properties::_CIPRank, cip)) { // If at least one of the atoms doesn't have a CIP rank, the highest rank // does not make sense, so return a nullptr. return nullptr; } else if (cip > bestCipRank || bestCipRankedAtom == nullptr) { bestCipRank = cip; bestCipRankedAtom = neighbor; } else if (cip == bestCipRank) { // This also doesn't make sense if there is a tie (if that's possible). // We still keep the best CIP rank in case something better comes around // (also not sure if that's possible). BOOST_LOG(rdWarningLog) << "Warning: duplicate CIP ranks found in findHighestCIPNeighbor()" << std::endl; bestCipRankedAtom = nullptr; } } return bestCipRankedAtom; } INT_VECT findStereoAtoms(const Bond *bond) { PRECONDITION(bond, "bad bond"); PRECONDITION(bond->hasOwningMol(), "no mol"); PRECONDITION(bond->getBondType() == Bond::DOUBLE, "not double bond"); PRECONDITION(bond->getStereo() > Bond::BondStereo::STEREOANY, "no defined stereo"); if (!bond->getStereoAtoms().empty()) { return bond->getStereoAtoms(); } if (bond->getStereo() == Bond::BondStereo::STEREOE || bond->getStereo() == Bond::BondStereo::STEREOZ) { const Atom *startStereoAtom = findHighestCIPNeighbor(bond->getBeginAtom(), bond->getEndAtom()); const Atom *endStereoAtom = findHighestCIPNeighbor(bond->getEndAtom(), bond->getBeginAtom()); if (startStereoAtom == nullptr || endStereoAtom == nullptr) { return {}; } int startStereoAtomIdx = static_cast<int>(startStereoAtom->getIdx()); int endStereoAtomIdx = static_cast<int>(endStereoAtom->getIdx()); return {startStereoAtomIdx, endStereoAtomIdx}; } else { BOOST_LOG(rdWarningLog) << "Unable to assign stereo atoms for bond " << bond->getIdx() << std::endl; return {}; } } } // namespace bool getReactantMatches(const MOL_SPTR_VECT &reactants, const ChemicalReaction &rxn, VectVectMatchVectType &matchesByReactant, unsigned int maxMatches, unsigned int matchSingleReactant = MatchAll) { PRECONDITION(reactants.size() == rxn.getNumReactantTemplates(), "reactant size mismatch"); matchesByReactant.clear(); matchesByReactant.resize(reactants.size()); bool res = true; unsigned int i = 0; for (auto iter = rxn.beginReactantTemplates(); iter != rxn.endReactantTemplates(); ++iter, i++) { if (matchSingleReactant == MatchAll || matchSingleReactant == i) { // NOTE that we are *not* uniquifying the results. // This is because we need multiple matches in reactions. For example, // The ring-closure coded as: // [C:1]=[C:2] + [C:3]=[C:4][C:5]=[C:6] -> // [C:1]1[C:2][C:3][C:4]=[C:5][C:6]1 // should give 4 products here: // [Cl]C=C + [Br]C=CC=C -> // [Cl]C1C([Br])C=CCC1 // [Cl]C1CC(Br)C=CC1 // C1C([Br])C=CCC1[Cl] // C1CC([Br])C=CC1[Cl] // Yes, in this case there are only 2 unique products, but that's // a factor of the reactants' symmetry. // // There's no particularly straightforward way of solving this problem // of recognizing cases // where we should give all matches and cases where we shouldn't; it's // safer to just // produce everything and let the client deal with uniquifying their // results. SubstructMatchParameters ssps; ssps.uniquify = false; ssps.maxMatches = maxMatches; auto matchesHere = SubstructMatch(*(reactants[i]), *iter->get(), ssps); auto matchCount = matchesHere.size(); for (const auto &match : matchesHere) { bool keep = true; for (const auto &pr : match) { if (reactants[i]->getAtomWithIdx(pr.second)->hasProp( common_properties::_protected)) { keep = false; break; } } if (keep) { matchesByReactant[i].push_back(match); } else { --matchCount; } } if (!matchCount) { // no point continuing if we don't match one of the reactants: res = false; break; } } } return res; } // end of getReactantMatches() // Return false if maxProducts has been hit... // Otherwise we can't tell if we were stopped exactly // or were terminated. bool recurseOverReactantCombinations( const VectVectMatchVectType &matchesByReactant, VectVectMatchVectType &matchesPerProduct, unsigned int level, VectMatchVectType combination, unsigned int maxProducts) { unsigned int nReactants = matchesByReactant.size(); URANGE_CHECK(level, nReactants); PRECONDITION(combination.size() == nReactants, "bad combination size"); if (maxProducts && matchesPerProduct.size() >= maxProducts) { return false; } bool keepGoing = true; for (auto reactIt = matchesByReactant[level].begin(); reactIt != matchesByReactant[level].end(); ++reactIt) { VectMatchVectType prod = combination; prod[level] = *reactIt; if (level == nReactants - 1) { // this is the bottom of the recursion: if (maxProducts && matchesPerProduct.size() >= maxProducts) { keepGoing = false; break; } matchesPerProduct.push_back(prod); } else { keepGoing = recurseOverReactantCombinations( matchesByReactant, matchesPerProduct, level + 1, prod, maxProducts); } } return keepGoing; } // end of recurseOverReactantCombinations void updateImplicitAtomProperties(Atom *prodAtom, const Atom *reactAtom) { PRECONDITION(prodAtom, "no product atom"); PRECONDITION(reactAtom, "no reactant atom"); if (prodAtom->getAtomicNum() != reactAtom->getAtomicNum()) { // if we changed atom identity all bets are off, just // return return; } if (!prodAtom->hasProp(common_properties::_QueryFormalCharge)) { prodAtom->setFormalCharge(reactAtom->getFormalCharge()); } if (!prodAtom->hasProp(common_properties::_QueryIsotope)) { prodAtom->setIsotope(reactAtom->getIsotope()); } if (!prodAtom->hasProp(common_properties::_ReactionDegreeChanged)) { if (!prodAtom->hasProp(common_properties::_QueryHCount)) { prodAtom->setNumExplicitHs(reactAtom->getNumExplicitHs()); prodAtom->setNoImplicit(reactAtom->getNoImplicit()); } } } void generateReactantCombinations( const VectVectMatchVectType &matchesByReactant, VectVectMatchVectType &matchesPerProduct, unsigned int maxProducts) { matchesPerProduct.clear(); VectMatchVectType tmp; tmp.clear(); tmp.resize(matchesByReactant.size()); if (!recurseOverReactantCombinations(matchesByReactant, matchesPerProduct, 0, tmp, maxProducts)) { BOOST_LOG(rdWarningLog) << "Maximum product count hit " << maxProducts << ", stopping reaction early...\n"; } } // end of generateReactantCombinations() RWMOL_SPTR convertTemplateToMol(const ROMOL_SPTR prodTemplateSptr) { const ROMol *prodTemplate = prodTemplateSptr.get(); auto *res = new RWMol(); // --------- --------- --------- --------- --------- --------- // Initialize by making a copy of the product template as a normal molecule. // NOTE that we can't just use a normal copy because we do not want to end up // with query atoms or bonds in the product. // copy in the atoms: ROMol::ATOM_ITER_PAIR atItP = prodTemplate->getVertices(); while (atItP.first != atItP.second) { const Atom *oAtom = (*prodTemplate)[*(atItP.first++)]; auto *newAtom = new Atom(*oAtom); res->addAtom(newAtom, false, true); int mapNum; if (newAtom->getPropIfPresent(common_properties::molAtomMapNumber, mapNum)) { // set bookmarks for the mapped atoms: res->setAtomBookmark(newAtom, mapNum); // now clear the molAtomMapNumber property so that it doesn't // end up in the products (this was bug 3140490): newAtom->clearProp(common_properties::molAtomMapNumber); newAtom->setProp<int>(common_properties::reactionMapNum, mapNum); } newAtom->setChiralTag(Atom::CHI_UNSPECIFIED); // if the product-template atom has the inversion flag set // to 4 (=SET), then bring its stereochem over, otherwise we'll // ignore it: int iFlag; if (oAtom->getPropIfPresent(common_properties::molInversionFlag, iFlag)) { if (iFlag == 4) { newAtom->setChiralTag(oAtom->getChiralTag()); } } // check for properties we need to set: int val; if (newAtom->getPropIfPresent(common_properties::_QueryFormalCharge, val)) { newAtom->setFormalCharge(val); } if (newAtom->getPropIfPresent(common_properties::_QueryHCount, val)) { newAtom->setNumExplicitHs(val); newAtom->setNoImplicit(true); // this was github #1544 } if (newAtom->getPropIfPresent(common_properties::_QueryMass, val)) { // FIX: technically should do something with this // newAtom->setMass(val); } if (newAtom->getPropIfPresent(common_properties::_QueryIsotope, val)) { newAtom->setIsotope(val); } } // and the bonds: ROMol::BOND_ITER_PAIR bondItP = prodTemplate->getEdges(); while (bondItP.first != bondItP.second) { const Bond *oldB = (*prodTemplate)[*(bondItP.first++)]; unsigned int bondIdx; bondIdx = res->addBond(oldB->getBeginAtomIdx(), oldB->getEndAtomIdx(), oldB->getBondType()) - 1; // make sure we don't lose the bond dir information: Bond *newB = res->getBondWithIdx(bondIdx); newB->setBondDir(oldB->getBondDir()); // Special case/hack: // The product has been processed by the SMARTS parser. // The SMARTS parser tags unspecified bonds as single, but then adds // a query so that they match single or double // This caused Issue 1748846 // http://sourceforge.net/tracker/index.php?func=detail&aid=1748846&group_id=160139&atid=814650 // We need to fix that little problem now: if (oldB->hasQuery()) { // remember that the product has been processed by the SMARTS parser. std::string queryDescription = oldB->getQuery()->getDescription(); if (queryDescription == "BondOr" && oldB->getBondType() == Bond::SINGLE) { // We need to fix that little problem now: if (newB->getBeginAtom()->getIsAromatic() && newB->getEndAtom()->getIsAromatic()) { newB->setBondType(Bond::AROMATIC); newB->setIsAromatic(true); } else { newB->setBondType(Bond::SINGLE); newB->setIsAromatic(false); } } else if (queryDescription == "BondNull") { newB->setProp(common_properties::NullBond, 1); } } // Double bond stereo: if a double bond has at least one bond on each side, // and none of those has a direction, then we temporarily set STEREOANY. // This has to be done before the reactant atoms are added, and will be // reviewed later on. if (oldB->getBondType() == Bond::BondType::DOUBLE) { const Atom *startAtom = oldB->getBeginAtom(); const Atom *endAtom = oldB->getEndAtom(); if (startAtom->getDegree() > 1 && endAtom->getDegree() > 1 && (Chirality::getNeighboringDirectedBond(*prodTemplate, startAtom) == nullptr || Chirality::getNeighboringDirectedBond(*prodTemplate, endAtom) == nullptr)) { newB->setStereo(Bond::BondStereo::STEREOANY); } } // copy properties over: bool preserveExisting = true; newB->updateProps(*static_cast<const RDProps *>(oldB), preserveExisting); } return RWMOL_SPTR(res); } // end of convertTemplateToMol() ReactantProductAtomMapping *getAtomMappingsReactantProduct( const MatchVectType &match, const ROMol &reactantTemplate, RWMOL_SPTR product, unsigned numReactAtoms) { auto *mapping = new ReactantProductAtomMapping(numReactAtoms); // keep track of which mapped atoms in the reactant template are bonded to // each other. // This is part of the fix for #1387 { ROMol::EDGE_ITER firstB, lastB; boost::tie(firstB, lastB) = reactantTemplate.getEdges(); while (firstB != lastB) { const Bond *bond = reactantTemplate[*firstB]; // this will put in pairs with 0s for things that aren't mapped, but we // don't care about that int a1mapidx = bond->getBeginAtom()->getAtomMapNum(); int a2mapidx = bond->getEndAtom()->getAtomMapNum(); if (a1mapidx > a2mapidx) { std::swap(a1mapidx, a2mapidx); } mapping->reactantTemplateAtomBonds[std::make_pair(a1mapidx, a2mapidx)] = 1; ++firstB; } } for (const auto &i : match) { const Atom *templateAtom = reactantTemplate.getAtomWithIdx(i.first); int molAtomMapNumber; if (templateAtom->getPropIfPresent(common_properties::molAtomMapNumber, molAtomMapNumber)) { if (product->hasAtomBookmark(molAtomMapNumber)) { RWMol::ATOM_PTR_LIST atomIdxs = product->getAllAtomsWithBookmark(molAtomMapNumber); for (auto a : atomIdxs) { unsigned int pIdx = a->getIdx(); mapping->reactProdAtomMap[i.second].push_back(pIdx); mapping->mappedAtoms[i.second] = 1; CHECK_INVARIANT(pIdx < product->getNumAtoms(), "yikes!"); mapping->prodReactAtomMap[pIdx] = i.second; } } else { // this skippedAtom has an atomMapNumber, but it's not in this product // (it's either in another product or it's not mapped at all). mapping->skippedAtoms[i.second] = 1; } } else { // This skippedAtom appears in the match, but not in a product: mapping->skippedAtoms[i.second] = 1; } } return mapping; } namespace { unsigned reactProdMapAnchorIdx(Atom *atom, const RDKit::UINT_VECT &pMatches) { PRECONDITION(atom, "no atom"); if (pMatches.size() == 1) { return pMatches[0]; } const auto &pMol = atom->getOwningMol(); const unsigned atomIdx = atom->getIdx(); auto areAtomsBonded = [&pMol, &atomIdx](const unsigned &pAnchor) { return pMol.getBondBetweenAtoms(atomIdx, pAnchor) != nullptr; }; auto match = std::find_if(pMatches.begin(), pMatches.end(), areAtomsBonded); CHECK_INVARIANT(match != pMatches.end(), "match not found"); return *match; } void forwardReactantBondStereo(ReactantProductAtomMapping *mapping, Bond *pBond, const ROMol &reactant, const Bond *rBond) { PRECONDITION(mapping, "no mapping"); PRECONDITION(pBond, "no bond"); PRECONDITION(rBond, "no bond"); PRECONDITION(rBond->getStereo() > Bond::BondStereo::STEREOANY, "bond in reactant must have defined stereo"); auto &prod2React = mapping->prodReactAtomMap; const Atom *rStart = rBond->getBeginAtom(); const Atom *rEnd = rBond->getEndAtom(); const auto rStereoAtoms = findStereoAtoms(rBond); if (rStereoAtoms.size() != 2) { BOOST_LOG(rdWarningLog) << "WARNING: neither stereo atoms nor CIP codes found for double bond. " "Stereochemistry info will not be propagated to product." << std::endl; pBond->setStereo(Bond::BondStereo::STEREONONE); return; } StereoBondEndCap start(reactant, rStart, rEnd, rStereoAtoms[0]); StereoBondEndCap end(reactant, rEnd, rStart, rStereoAtoms[1]); // The bond might be matched backwards in the reaction if (prod2React[pBond->getBeginAtomIdx()] == rEnd->getIdx()) { std::swap(start, end); } else if (prod2React[pBond->getBeginAtomIdx()] != rStart->getIdx()) { throw std::logic_error("Reactant and Product bond ends do not match"); } /** * The reactants stereo can be transmitted in three similar ways: * * 1. Survival of both stereoatoms: direct forwarding happens, i.e., * * C/C=C/[Br] in reaction [C:1]=[C:2]>>[Si:1]=[C:2]: * * C/C=C/[Br] >> C/Si=C/[Br], C/C=Si/[Br] (2 product sets) * * Both stereoatoms exist unaltered in both product sets, so we can forward * the same bond stereochemistry (trans) and set the stereoatoms in the * product to the mapped indexes of the stereoatoms in the reactant. * * 2. Survival of both anti-stereoatoms: as this pair is symmetric to the * stereoatoms, direct forwarding also happens in this case, i.e., * * Cl/C(C)=C(/Br)F in reaction * [Cl:4][C:1]=[C:2][Br:3]>>[C:1]=[C:2].[Br:3].[Cl:4]: * Cl/C(C)=C(/Br)F >> C/C=C/F + Br + Cl * * Both stereoatoms in the reactant are split from the molecule, * but the anti-stereoatoms remain in it. Since these have symmetrical * orientation to the stereoatoms, we can use these (their mapped * equivalents) as stereoatoms in the product and use the same * stereochemistry label (trans). * * 3. Survival of a mixed pair stereoatom-anti-stereoatom: such a pair * defines the opposite stereochemistry to the one labeled on the * reactant, but it is also valid, as long ase we use the properly mapped * indexes: * * Cl/C(C)=C(/Br)F in reaction [Cl:4][C:1]=[C:2][Br:3]>>[C:1]=[C:2].[Br:3]: * * Cl/C(C)=C(/Br)F >> C/C=C/F + Br * * In this case, one of the stereoatoms is conserved, and the other one is * switched to the other neighbor at the same end of the bond as the * non-conserved stereoatom. Since the reference changed, the * stereochemistry label needs to be flipped too: in this case, the * reactant was trans, and the product will be cis. * * Reaction [Cl:4][C:1]=[C:2][Br:3]>>[C:1]=[C:2].[Cl:4] would have the same * effect, with the only difference that the non-conserved stereoatom would * be the one at the opposite end of the reactant. */ auto pStartAnchorCandidates = start.getProductAnchorCandidates(mapping); auto pEndAnchorCandidates = end.getProductAnchorCandidates(mapping); // The reaction has invalidated the reactant's stereochemistry if (pStartAnchorCandidates.first.empty() || pEndAnchorCandidates.first.empty()) { return; } unsigned pStartAnchorIdx = reactProdMapAnchorIdx( pBond->getBeginAtom(), pStartAnchorCandidates.first); unsigned pEndAnchorIdx = reactProdMapAnchorIdx(pBond->getEndAtom(), pEndAnchorCandidates.first); pBond->setStereoAtoms(pStartAnchorIdx, pEndAnchorIdx); bool flipStereo = (pStartAnchorCandidates.second + pEndAnchorCandidates.second) % 2; if (rBond->getStereo() == Bond::BondStereo::STEREOCIS || rBond->getStereo() == Bond::BondStereo::STEREOZ) { if (flipStereo) { pBond->setStereo(Bond::BondStereo::STEREOTRANS); } else { pBond->setStereo(Bond::BondStereo::STEREOCIS); } } else { if (flipStereo) { pBond->setStereo(Bond::BondStereo::STEREOCIS); } else { pBond->setStereo(Bond::BondStereo::STEREOTRANS); } } } void translateProductStereoBondDirections(Bond *pBond, const Bond *start, const Bond *end) { PRECONDITION(pBond, "no bond"); PRECONDITION(start && end && Chirality::hasStereoBondDir(start) && Chirality::hasStereoBondDir(end), "Both neighboring bonds must have bond directions"); unsigned pStartAnchorIdx = start->getOtherAtomIdx(pBond->getBeginAtomIdx()); unsigned pEndAnchorIdx = end->getOtherAtomIdx(pBond->getEndAtomIdx()); pBond->setStereoAtoms(pStartAnchorIdx, pEndAnchorIdx); if (start->getBondDir() == end->getBondDir()) { pBond->setStereo(Bond::BondStereo::STEREOTRANS); } else { pBond->setStereo(Bond::BondStereo::STEREOCIS); } } /** * Core of the double bond stereochemistry handling (the first stereo check on * the product template does actually happen in convertTemplateToMol()). * * Stereo in the product templates (defined by bond directions) will override * the one in the reactants. * * Each double bond will be checked against the following rules: * 1- if product bond is marked as STEREOANY, check if stereo is possible * on the bond, and eventually, keep the STEREOANY label or reset it to * STEREONONE if not. * 2- if the product has bond directions set, deduce the final stereochemistry * from them. * 3- if there are no bond directions, check the atom mapping in the reaction to * see if the reactant's stereochemistry is preserved. * 4- in any other case, keep the STEREONONE label. */ void updateStereoBonds(RWMOL_SPTR product, const ROMol &reactant, ReactantProductAtomMapping *mapping) { for (Bond *pBond : product->bonds()) { // We are only interested in double bonds if (pBond->getBondType() != Bond::BondType::DOUBLE) { continue; } // If the product bond was previously marked as STEREOANY, check if it can // actually sustain stereo (this could not be checked until we had all the // atoms in the product) if (Bond::BondStereo::STEREOANY == pBond->getStereo()) { Atom *pStart = pBond->getBeginAtom(); Atom *pEnd = pBond->getEndAtom(); pStart->calcImplicitValence(true); pEnd->calcImplicitValence(true); if (pStart->getTotalDegree() < 3 || pEnd->getTotalDegree() < 3) { pBond->setStereo(Bond::BondStereo::STEREONONE); } continue; } // Check if the reaction defined the stereo for the bond: SMARTS can only // use bond directions for this, and both sides of the double bond must have // them, else they will be ignored, as there is no reference to decide the // stereo. const auto *pBondStartDirBond = Chirality::getNeighboringDirectedBond(*product, pBond->getBeginAtom()); const auto *pBondEndDirBond = Chirality::getNeighboringDirectedBond(*product, pBond->getEndAtom()); if (pBondStartDirBond != nullptr && pBondEndDirBond != nullptr) { translateProductStereoBondDirections(pBond, pBondStartDirBond, pBondEndDirBond); } else { // If the reaction did not specify the stereo, then we need to rely on the // atom mapping and use the reactant's stereo. // The atoms and the bond might have been added in the reaction const auto begIdxItr = mapping->prodReactAtomMap.find(pBond->getBeginAtomIdx()); if (begIdxItr == mapping->prodReactAtomMap.end()) { continue; } const auto endIdxItr = mapping->prodReactAtomMap.find(pBond->getEndAtomIdx()); if (endIdxItr == mapping->prodReactAtomMap.end()) { continue; } const Bond *rBond = reactant.getBondBetweenAtoms(begIdxItr->second, endIdxItr->second); if (rBond && rBond->getBondType() == Bond::BondType::DOUBLE) { // The bond might not have been present in the reactant, or its order // might have changed if (rBond->getStereo() > Bond::BondStereo::STEREOANY) { // If the bond had stereo, forward it forwardReactantBondStereo(mapping, pBond, reactant, rBond); } else if (rBond->getStereo() == Bond::BondStereo::STEREOANY) { pBond->setStereo(Bond::BondStereo::STEREOANY); } } // No stereo: Bond::BondStereo::STEREONONE } } } } // namespace void setReactantBondPropertiesToProduct(RWMOL_SPTR product, const ROMol &reactant, ReactantProductAtomMapping *mapping) { ROMol::BOND_ITER_PAIR bondItP = product->getEdges(); while (bondItP.first != bondItP.second) { Bond *pBond = (*product)[*(bondItP.first)]; ++bondItP.first; if (!pBond->hasProp(common_properties::NullBond) && !pBond->hasProp(common_properties::_MolFileBondQuery)) { continue; } auto rBondBegin = mapping->prodReactAtomMap.find(pBond->getBeginAtomIdx()); auto rBondEnd = mapping->prodReactAtomMap.find(pBond->getEndAtomIdx()); if (rBondBegin == mapping->prodReactAtomMap.end() || rBondEnd == mapping->prodReactAtomMap.end()) { continue; } // the bond is between two mapped atoms from this reactant: const Bond *rBond = reactant.getBondBetweenAtoms(rBondBegin->second, rBondEnd->second); if (!rBond) { continue; } pBond->setBondType(rBond->getBondType()); pBond->setIsAromatic(rBond->getIsAromatic()); if (pBond->hasProp(common_properties::NullBond)) { pBond->clearProp(common_properties::NullBond); } } } void checkProductChirality(Atom::ChiralType reactantChirality, Atom *productAtom) { int flagVal; productAtom->getProp(common_properties::molInversionFlag, flagVal); switch (flagVal) { case 0: // reaction doesn't have anything to say about the chirality // FIX: should we clear the chirality or leave it alone? for now we leave // it alone productAtom->setChiralTag(reactantChirality); break; case 1: // reaction inverts chirality if (reactantChirality != Atom::CHI_TETRAHEDRAL_CW && reactantChirality != Atom::CHI_TETRAHEDRAL_CCW) { BOOST_LOG(rdWarningLog) << "unsupported chiral type on reactant atom ignored\n"; } else { productAtom->setChiralTag(reactantChirality); productAtom->invertChirality(); } break; case 2: // reaction retains chirality: // retention: just set to the reactant productAtom->setChiralTag(reactantChirality); break; case 3: // reaction destroys chirality: // remove stereo productAtom->setChiralTag(Atom::CHI_UNSPECIFIED); break; case 4: // reaction creates chirality. // set stereo, so leave it the way it was in the product template break; default: BOOST_LOG(rdWarningLog) << "unrecognized chiral inversion/retention flag " "on product atom ignored\n"; } } void setReactantAtomPropertiesToProduct(Atom *productAtom, const Atom &reactantAtom, bool setImplicitProperties) { // which properties need to be set from the reactant? if (productAtom->getAtomicNum() <= 0 || productAtom->hasProp(common_properties::_MolFileAtomQuery)) { productAtom->setAtomicNum(reactantAtom.getAtomicNum()); productAtom->setIsAromatic(reactantAtom.getIsAromatic()); // don't copy isotope information over from dummy atoms // (part of github #243) unless we're setting implicit properties, // in which case we do need to copy them in (github #1269) if (!setImplicitProperties) { productAtom->setIsotope(reactantAtom.getIsotope()); } // remove dummy labels (if present) if (productAtom->hasProp(common_properties::dummyLabel)) { productAtom->clearProp(common_properties::dummyLabel); } if (productAtom->hasProp(common_properties::_MolFileRLabel)) { productAtom->clearProp(common_properties::_MolFileRLabel); } productAtom->setProp<unsigned int>(common_properties::reactantAtomIdx, reactantAtom.getIdx()); productAtom->setProp(WAS_DUMMY, true); } else { // remove bookkeeping labels (if present) if (productAtom->hasProp(WAS_DUMMY)) { productAtom->clearProp(WAS_DUMMY); } } productAtom->setProp<unsigned int>(common_properties::reactantAtomIdx, reactantAtom.getIdx()); if (setImplicitProperties) { updateImplicitAtomProperties(productAtom, &reactantAtom); } // One might be tempted to copy over the reactant atom's chirality into the // product atom if chirality is not specified on the product. This would be a // very bad idea because the order of bonds will almost certainly change on // the atom and the chirality is referenced to bond order. // --------- --------- --------- --------- --------- --------- // While we're here, set the stereochemistry // FIX: this should be free-standing, not in this function. if (reactantAtom.getChiralTag() != Atom::CHI_UNSPECIFIED && reactantAtom.getChiralTag() != Atom::CHI_OTHER && productAtom->hasProp(common_properties::molInversionFlag)) { checkProductChirality(reactantAtom.getChiralTag(), productAtom); } // copy over residue information if it's there. This was github #1632 if (reactantAtom.getMonomerInfo()) { productAtom->setMonomerInfo(reactantAtom.getMonomerInfo()->copy()); } } void addMissingProductBonds(const Bond &origB, RWMOL_SPTR product, ReactantProductAtomMapping *mapping) { unsigned int begIdx = origB.getBeginAtomIdx(); unsigned int endIdx = origB.getEndAtomIdx(); std::vector<unsigned> prodBeginIdxs = mapping->reactProdAtomMap[begIdx]; std::vector<unsigned> prodEndIdxs = mapping->reactProdAtomMap[endIdx]; CHECK_INVARIANT(prodBeginIdxs.size() == prodEndIdxs.size(), "Different number of start-end points for product bonds."); for (unsigned i = 0; i < prodBeginIdxs.size(); i++) { product->addBond(prodBeginIdxs.at(i), prodEndIdxs.at(i), origB.getBondType()); } } void addMissingProductAtom(const Atom &reactAtom, unsigned reactNeighborIdx, unsigned prodNeighborIdx, RWMOL_SPTR product, const ROMol &reactant, ReactantProductAtomMapping *mapping) { auto *newAtom = new Atom(reactAtom); unsigned reactAtomIdx = reactAtom.getIdx(); newAtom->setProp<unsigned int>(common_properties::reactantAtomIdx, reactAtomIdx); unsigned productIdx = product->addAtom(newAtom, false, true); mapping->reactProdAtomMap[reactAtomIdx].push_back(productIdx); mapping->prodReactAtomMap[productIdx] = reactAtomIdx; // add the bonds const Bond *origB = reactant.getBondBetweenAtoms(reactNeighborIdx, reactAtomIdx); unsigned int begIdx = origB->getBeginAtomIdx(); if (begIdx == reactNeighborIdx) { product->addBond(prodNeighborIdx, productIdx, origB->getBondType()); } else { product->addBond(productIdx, prodNeighborIdx, origB->getBondType()); } } void addReactantNeighborsToProduct( const ROMol &reactant, const Atom &reactantAtom, RWMOL_SPTR product, boost::dynamic_bitset<> &visitedAtoms, std::vector<const Atom *> &chiralAtomsToCheck, ReactantProductAtomMapping *mapping) { std::list<const Atom *> atomStack; atomStack.push_back(&reactantAtom); // std::cerr << "-------------------" << std::endl; // std::cerr << " add reactant neighbors from: " << reactantAtom.getIdx() // << std::endl; // #if 1 // product->updatePropertyCache(false); // product->debugMol(std::cerr); // std::cerr << "-------------------" << std::endl; // #endif while (!atomStack.empty()) { const Atom *lReactantAtom = atomStack.front(); // std::cerr << " front: " << lReactantAtom->getIdx() << std::endl; atomStack.pop_front(); // each atom in the stack is guaranteed to already be in the product: CHECK_INVARIANT(mapping->reactProdAtomMap.find(lReactantAtom->getIdx()) != mapping->reactProdAtomMap.end(), "reactant atom on traversal stack not present in product."); std::vector<unsigned> lReactantAtomProductIndex = mapping->reactProdAtomMap[lReactantAtom->getIdx()]; unsigned lreactIdx = lReactantAtom->getIdx(); visitedAtoms[lreactIdx] = 1; // Check our neighbors: ROMol::ADJ_ITER nbrIdx, endNbrs; boost::tie(nbrIdx, endNbrs) = reactant.getAtomNeighbors(lReactantAtom); while (nbrIdx != endNbrs) { // Four possibilities here. The neighbor: // 0) has been visited already: do nothing // 1) is part of the match (thus already in the product): set a bond to // it // 2) has been added: set a bond to it // 3) has not yet been added: add it, set a bond to it, and push it // onto the stack // std::cerr << " nbr: " << *nbrIdx << std::endl; // std::cerr << " visited: " << visitedAtoms[*nbrIdx] // << " skipped: " << mapping->skippedAtoms[*nbrIdx] // << " mapped: " << mapping->mappedAtoms[*nbrIdx] // << " mappedO: " << mapping->mappedAtoms[lreactIdx] << // std::endl; if (!visitedAtoms[*nbrIdx] && !mapping->skippedAtoms[*nbrIdx]) { if (mapping->mappedAtoms[*nbrIdx]) { // this is case 1 (neighbor in match); set a bond to the neighbor if // this atom // is not also in the match (match-match bonds were set when the // product template was // copied in to start things off).; if (!mapping->mappedAtoms[lreactIdx]) { CHECK_INVARIANT(mapping->reactProdAtomMap.find(*nbrIdx) != mapping->reactProdAtomMap.end(), "reactant atom not present in product."); const Bond *origB = reactant.getBondBetweenAtoms(lreactIdx, *nbrIdx); addMissingProductBonds(*origB, product, mapping); } else { // both mapped atoms are in the match. // they are bonded in the reactant (otherwise we wouldn't be here), // // If they do not have already have a bond in the product and did // not have one in the reactant template then set one here // If they do have a bond in the reactant template, then we // assume that this is an intentional bond break, so we don't do // anything // // this was github #1387 unsigned prodBeginIdx = mapping->reactProdAtomMap[lreactIdx][0]; unsigned prodEndIdx = mapping->reactProdAtomMap[*nbrIdx][0]; if (!product->getBondBetweenAtoms(prodBeginIdx, prodEndIdx)) { // They must be mapped CHECK_INVARIANT( product->getAtomWithIdx(prodBeginIdx) ->hasProp(common_properties::reactionMapNum) && product->getAtomWithIdx(prodEndIdx) ->hasProp(common_properties::reactionMapNum), "atoms should be mapped in product"); int a1mapidx = product->getAtomWithIdx(prodBeginIdx) ->getProp<int>(common_properties::reactionMapNum); int a2mapidx = product->getAtomWithIdx(prodEndIdx) ->getProp<int>(common_properties::reactionMapNum); if (a1mapidx > a2mapidx) { std::swap(a1mapidx, a2mapidx); } if (mapping->reactantTemplateAtomBonds.find( std::make_pair(a1mapidx, a2mapidx)) == mapping->reactantTemplateAtomBonds.end()) { const Bond *origB = reactant.getBondBetweenAtoms(lreactIdx, *nbrIdx); addMissingProductBonds(*origB, product, mapping); } } } } else if (mapping->reactProdAtomMap.find(*nbrIdx) != mapping->reactProdAtomMap.end()) { // case 2, the neighbor has been added and we just need to set a bond // to it: const Bond *origB = reactant.getBondBetweenAtoms(lreactIdx, *nbrIdx); addMissingProductBonds(*origB, product, mapping); } else { // case 3, add the atom, a bond to it, and push the atom onto the // stack const Atom *neighbor = reactant.getAtomWithIdx(*nbrIdx); for (unsigned int i : lReactantAtomProductIndex) { addMissingProductAtom(*neighbor, lreactIdx, i, product, reactant, mapping); } // update the stack: atomStack.push_back(neighbor); // if the atom is chiral, we need to check its bond ordering later: if (neighbor->getChiralTag() != Atom::CHI_UNSPECIFIED) { chiralAtomsToCheck.push_back(neighbor); } } } nbrIdx++; } } // end of atomStack traversal } void checkAndCorrectChiralityOfMatchingAtomsInProduct( const ROMol &reactant, unsigned reactantAtomIdx, const Atom &reactantAtom, RWMOL_SPTR product, ReactantProductAtomMapping *mapping) { for (unsigned i = 0; i < mapping->reactProdAtomMap[reactantAtomIdx].size(); i++) { unsigned productAtomIdx = mapping->reactProdAtomMap[reactantAtomIdx][i]; Atom *productAtom = product->getAtomWithIdx(productAtomIdx); if (productAtom->getChiralTag() != Atom::CHI_UNSPECIFIED || reactantAtom.getChiralTag() == Atom::CHI_UNSPECIFIED || reactantAtom.getChiralTag() == Atom::CHI_OTHER || productAtom->hasProp(common_properties::molInversionFlag)) { continue; } // we can only do something sensible here if we have the same number of // bonds in the reactants and the products: if (reactantAtom.getDegree() != productAtom->getDegree()) { continue; } unsigned int nUnknown = 0; INT_LIST pOrder; ROMol::ADJ_ITER nbrIdx, endNbrs; boost::tie(nbrIdx, endNbrs) = product->getAtomNeighbors(productAtom); while (nbrIdx != endNbrs) { if (mapping->prodReactAtomMap.find(*nbrIdx) == mapping->prodReactAtomMap.end() || !reactant.getBondBetweenAtoms(reactantAtom.getIdx(), mapping->prodReactAtomMap[*nbrIdx])) { ++nUnknown; // if there's more than one bond in the product that doesn't correspond // to anything in the reactant, we're also doomed if (nUnknown > 1) { break; } // otherwise, add a -1 to the bond order that we'll fill in later pOrder.push_back(-1); } else { const Bond *rBond = reactant.getBondBetweenAtoms( reactantAtom.getIdx(), mapping->prodReactAtomMap[*nbrIdx]); CHECK_INVARIANT(rBond, "expected reactant bond not found"); pOrder.push_back(rBond->getIdx()); } ++nbrIdx; } if (nUnknown == 1) { // find the reactant bond that hasn't yet been accounted for: int unmatchedBond = -1; boost::tie(nbrIdx, endNbrs) = reactant.getAtomNeighbors(&reactantAtom); while (nbrIdx != endNbrs) { const Bond *rBond = reactant.getBondBetweenAtoms(reactantAtom.getIdx(), *nbrIdx); if (std::find(pOrder.begin(), pOrder.end(), rBond->getIdx()) == pOrder.end()) { unmatchedBond = rBond->getIdx(); break; } ++nbrIdx; } // what must be true at this point: // 1) there's a -1 in pOrder that we'll substitute for // 2) unmatchedBond contains the index of the substitution auto bPos = std::find(pOrder.begin(), pOrder.end(), -1); if (unmatchedBond >= 0 && bPos != pOrder.end()) { *bPos = unmatchedBond; } if (std::find(pOrder.begin(), pOrder.end(), -1) == pOrder.end()) { nUnknown = 0; } } if (!nUnknown) { productAtom->setChiralTag(reactantAtom.getChiralTag()); int nSwaps = reactantAtom.getPerturbationOrder(pOrder); if (nSwaps % 2) { productAtom->invertChirality(); } } } } // Check the chirality of atoms not directly involved in the reaction void checkAndCorrectChiralityOfProduct( const std::vector<const Atom *> &chiralAtomsToCheck, RWMOL_SPTR product, ReactantProductAtomMapping *mapping) { for (auto reactantAtom : chiralAtomsToCheck) { CHECK_INVARIANT(reactantAtom->getChiralTag() != Atom::CHI_UNSPECIFIED, "missing atom chirality."); const auto reactAtomDegree = reactantAtom->getOwningMol().getAtomDegree(reactantAtom); for (unsigned i = 0; i < mapping->reactProdAtomMap[reactantAtom->getIdx()].size(); i++) { unsigned productAtomIdx = mapping->reactProdAtomMap[reactantAtom->getIdx()][i]; Atom *productAtom = product->getAtomWithIdx(productAtomIdx); CHECK_INVARIANT( reactantAtom->getChiralTag() == productAtom->getChiralTag(), "invalid product chirality."); if (reactAtomDegree != product->getAtomDegree(productAtom)) { // If the number of bonds to the atom has changed in the course of the // reaction we're lost, so remove chirality. // A word of explanation here: the atoms in the chiralAtomsToCheck set // are not explicitly mapped atoms of the reaction, so we really have // no idea what to do with this case. At the moment I'm not even really // sure how this could happen, but better safe than sorry. productAtom->setChiralTag(Atom::CHI_UNSPECIFIED); } else if (reactantAtom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CW || reactantAtom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CCW) { // this will contain the indices of product bonds in the // reactant order: INT_LIST newOrder; ROMol::OEDGE_ITER beg, end; boost::tie(beg, end) = reactantAtom->getOwningMol().getAtomBonds(reactantAtom); while (beg != end) { const Bond *reactantBond = reactantAtom->getOwningMol()[*beg]; unsigned int oAtomIdx = reactantBond->getOtherAtomIdx(reactantAtom->getIdx()); CHECK_INVARIANT(mapping->reactProdAtomMap.find(oAtomIdx) != mapping->reactProdAtomMap.end(), "other atom from bond not mapped."); const Bond *productBond; unsigned neighborBondIdx = mapping->reactProdAtomMap[oAtomIdx][i]; productBond = product->getBondBetweenAtoms(productAtom->getIdx(), neighborBondIdx); CHECK_INVARIANT(productBond, "no matching bond found in product"); newOrder.push_back(productBond->getIdx()); ++beg; } int nSwaps = productAtom->getPerturbationOrder(newOrder); if (nSwaps % 2) { productAtom->invertChirality(); } } else { // not tetrahedral chirality, don't do anything. } } } // end of loop over chiralAtomsToCheck } /// // Copy enhanced stereo groups from one reactant to the product // stereo groups are copied if any atoms are in the product with // the stereochemical information from the reactant preserved. void copyEnhancedStereoGroups(const ROMol &reactant, RWMOL_SPTR product, const ReactantProductAtomMapping &mapping) { std::vector<StereoGroup> new_stereo_groups; for (const auto &sg : reactant.getStereoGroups()) { std::vector<Atom *> atoms; for (auto &&reactantAtom : sg.getAtoms()) { auto productAtoms = mapping.reactProdAtomMap.find(reactantAtom->getIdx()); if (productAtoms == mapping.reactProdAtomMap.end()) { continue; } for (auto &&productAtomIdx : productAtoms->second) { auto productAtom = product->getAtomWithIdx(productAtomIdx); // If chirality destroyed by the reaction, skip the atom if (productAtom->getChiralTag() == Atom::CHI_UNSPECIFIED) { continue; } // If chirality defined explicitly by the reaction, skip the atom int flagVal = 0; productAtom->getPropIfPresent(common_properties::molInversionFlag, flagVal); if (flagVal == 4) { continue; } atoms.push_back(productAtom); } } if (!atoms.empty()) { new_stereo_groups.emplace_back(sg.getGroupType(), std::move(atoms)); } } if (!new_stereo_groups.empty()) { auto &existing_sg = product->getStereoGroups(); new_stereo_groups.insert(new_stereo_groups.end(), existing_sg.begin(), existing_sg.end()); product->setStereoGroups(std::move(new_stereo_groups)); } } void generateProductConformers(Conformer *productConf, const ROMol &reactant, ReactantProductAtomMapping *mapping) { if (!reactant.getNumConformers()) { return; } const Conformer &reactConf = reactant.getConformer(); if (reactConf.is3D()) { productConf->set3D(true); } for (std::map<unsigned int, std::vector<unsigned int>>::const_iterator pr = mapping->reactProdAtomMap.begin(); pr != mapping->reactProdAtomMap.end(); ++pr) { std::vector<unsigned> prodIdxs = pr->second; if (prodIdxs.size() > 1) { BOOST_LOG(rdWarningLog) << "reactant atom match more than one product " "atom, coordinates need to be revised\n"; } // is this reliable when multiple product atom mapping occurs???? for (unsigned int prodIdx : prodIdxs) { productConf->setAtomPos(prodIdx, reactConf.getAtomPos(pr->first)); } } } void addReactantAtomsAndBonds(const ChemicalReaction &rxn, RWMOL_SPTR product, const ROMOL_SPTR reactantSptr, const MatchVectType &match, const ROMOL_SPTR reactantTemplate, Conformer *productConf) { // start by looping over all matches and marking the reactant atoms that // have already been "added" by virtue of being in the product. We'll also // mark "skipped" atoms: those that are in the match, but not in this // particular product (or, perhaps, not in any product) // At the same time we'll set up a map between the indices of those // atoms and their index in the product. ReactantProductAtomMapping *mapping = getAtomMappingsReactantProduct( match, *reactantTemplate, product, reactantSptr->getNumAtoms()); boost::dynamic_bitset<> visitedAtoms(reactantSptr->getNumAtoms()); const ROMol *reactant = reactantSptr.get(); // ---------- ---------- ---------- ---------- ---------- ---------- // Loop over the bonds in the product and look for those that have // the NullBond property set. These are bonds for which no information // (other than their existence) was provided in the template: setReactantBondPropertiesToProduct(product, *reactant, mapping); // ---------- ---------- ---------- ---------- ---------- ---------- // Loop over the atoms in the match that were added to the product // From the corresponding atom in the reactant, do a graph traversal // to find other connected atoms that should be added: std::vector<const Atom *> chiralAtomsToCheck; for (const auto &matchIdx : match) { int reactantAtomIdx = matchIdx.second; if (mapping->mappedAtoms[reactantAtomIdx]) { CHECK_INVARIANT(mapping->reactProdAtomMap.find(reactantAtomIdx) != mapping->reactProdAtomMap.end(), "mapped reactant atom not present in product."); const Atom *reactantAtom = reactant->getAtomWithIdx(reactantAtomIdx); for (unsigned i = 0; i < mapping->reactProdAtomMap[reactantAtomIdx].size(); i++) { // here's a pointer to the atom in the product: unsigned productAtomIdx = mapping->reactProdAtomMap[reactantAtomIdx][i]; Atom *productAtom = product->getAtomWithIdx(productAtomIdx); setReactantAtomPropertiesToProduct(productAtom, *reactantAtom, rxn.getImplicitPropertiesFlag()); } // now traverse: addReactantNeighborsToProduct(*reactant, *reactantAtom, product, visitedAtoms, chiralAtomsToCheck, mapping); // now that we've added all the reactant's neighbors, check to see if // it is chiral in the reactant but is not in the reaction. If so // we need to worry about its chirality checkAndCorrectChiralityOfMatchingAtomsInProduct( *reactant, reactantAtomIdx, *reactantAtom, product, mapping); } } // end of loop over matched atoms // ---------- ---------- ---------- ---------- ---------- ---------- // now we need to loop over atoms from the reactants that were chiral but not // directly involved in the reaction in order to make sure their chirality // hasn't been disturbed checkAndCorrectChiralityOfProduct(chiralAtomsToCheck, product, mapping); updateStereoBonds(product, *reactant, mapping); // ---------- ---------- ---------- ---------- ---------- ---------- // Copy enhanced StereoGroup data from reactant to product if it is // still valid. Uses ChiralTag checks above. copyEnhancedStereoGroups(*reactant, product, *mapping); // ---------- ---------- ---------- ---------- ---------- ---------- // finally we may need to set the coordinates in the product conformer: if (productConf) { productConf->resize(product->getNumAtoms()); generateProductConformers(productConf, *reactant, mapping); } delete (mapping); } // end of addReactantAtomsAndBonds MOL_SPTR_VECT generateOneProductSet(const ChemicalReaction &rxn, const MOL_SPTR_VECT &reactants, const std::vector<MatchVectType> &reactantsMatch) { PRECONDITION(reactants.size() == reactantsMatch.size(), "vector size mismatch"); // if any of the reactants have a conformer, we'll go ahead and // generate conformers for the products: bool doConfs = false; // if any of the reactants have a single bond with directionality specified, // we will make sure that the output molecules have directionality specified. bool doBondDirs = false; for (const auto &reactant : reactants) { if (reactant->getNumConformers()) { doConfs = true; } for (const auto bnd : reactant->bonds()) { if (bnd->getBondType() == Bond::SINGLE && bnd->getBondDir() > Bond::NONE) { doBondDirs = true; break; } } if (doConfs && doBondDirs) { break; } } MOL_SPTR_VECT res; res.resize(rxn.getNumProductTemplates()); unsigned int prodId = 0; for (auto pTemplIt = rxn.beginProductTemplates(); pTemplIt != rxn.endProductTemplates(); ++pTemplIt) { // copy product template and its properties to a new product RWMol RWMOL_SPTR product = convertTemplateToMol(*pTemplIt); Conformer *conf = nullptr; if (doConfs) { conf = new Conformer(); conf->set3D(false); } unsigned int reactantId = 0; for (auto iter = rxn.beginReactantTemplates(); iter != rxn.endReactantTemplates(); ++iter, reactantId++) { addReactantAtomsAndBonds(rxn, product, reactants.at(reactantId), reactantsMatch.at(reactantId), *iter, conf); } if (doConfs) { product->addConformer(conf, true); } // if there was bond direction information in any reactant, it has been // lost, add it back. if (doBondDirs) { MolOps::setDoubleBondNeighborDirections(*product); } res[prodId] = product; ++prodId; } return res; } } // namespace ReactionRunnerUtils std::vector<MOL_SPTR_VECT> run_Reactants(const ChemicalReaction &rxn, const MOL_SPTR_VECT &reactants, unsigned int maxProducts) { if (!rxn.isInitialized()) { throw ChemicalReactionException( "initMatchers() must be called before runReactants()"); } if (reactants.size() != rxn.getNumReactantTemplates()) { throw ChemicalReactionException( "Number of reactants provided does not match number of reactant " "templates."); } BOOST_FOREACH (ROMOL_SPTR msptr, reactants) { CHECK_INVARIANT(msptr, "bad molecule in reactants"); msptr->clearAllAtomBookmarks(); // we use this as scratch space } std::vector<MOL_SPTR_VECT> productMols; productMols.clear(); // if we have no products, return now: if (!rxn.getNumProductTemplates()) { return productMols; } // find the matches for each reactant: VectVectMatchVectType matchesByReactant; if (!ReactionRunnerUtils::getReactantMatches( reactants, rxn, matchesByReactant, maxProducts)) { // some reactants didn't find a match, return an empty product list: return productMols; } // ------------------------------------------------------- // we now have matches for each reactant, so we can start creating products: // start by doing the combinatorics on the matches: VectVectMatchVectType reactantMatchesPerProduct; ReactionRunnerUtils::generateReactantCombinations( matchesByReactant, reactantMatchesPerProduct, maxProducts); productMols.resize(reactantMatchesPerProduct.size()); for (unsigned int productId = 0; productId != productMols.size(); ++productId) { MOL_SPTR_VECT lProds = ReactionRunnerUtils::generateOneProductSet( rxn, reactants, reactantMatchesPerProduct[productId]); productMols[productId] = lProds; } return productMols; } // end of ChemicalReaction::runReactants() // Generate the product set based on a SINGLE reactant std::vector<MOL_SPTR_VECT> run_Reactant(const ChemicalReaction &rxn, const ROMOL_SPTR &reactant, unsigned int reactantIdx) { if (!rxn.isInitialized()) { throw ChemicalReactionException( "initMatchers() must be called before runReactants()"); } CHECK_INVARIANT(reactant, "bad molecule in reactants"); reactant->clearAllAtomBookmarks(); // we use this as scratch space std::vector<MOL_SPTR_VECT> productMols; // if we have no products, return now: if (!rxn.getNumProductTemplates()) { return productMols; } CHECK_INVARIANT(static_cast<size_t>(reactantIdx) < rxn.getReactants().size(), "reactantIdx out of bounds"); // find the matches for each reactant: VectVectMatchVectType matchesByReactant; // assemble the reactants (use an empty mol for missing reactants) MOL_SPTR_VECT reactants(rxn.getNumReactantTemplates()); for (size_t i = 0; i < rxn.getNumReactantTemplates(); ++i) { if (i == reactantIdx) { reactants[i] = reactant; } else { reactants[i] = ROMOL_SPTR(new ROMol); } } if (!ReactionRunnerUtils::getReactantMatches( reactants, rxn, matchesByReactant, 1000, reactantIdx)) { return productMols; } VectMatchVectType &matches = matchesByReactant[reactantIdx]; // each match on a reactant is a separate product VectVectMatchVectType matchesAtReactants(matches.size()); for (size_t i = 0; i < matches.size(); ++i) { matchesAtReactants[i].resize(rxn.getReactants().size()); matchesAtReactants[i][reactantIdx] = matches[i]; } productMols.resize(matches.size()); for (unsigned int productId = 0; productId != productMols.size(); ++productId) { MOL_SPTR_VECT lProds = ReactionRunnerUtils::generateOneProductSet( rxn, reactants, matchesAtReactants[productId]); productMols[productId] = lProds; } return productMols; } // end of ChemicalReaction::runReactants() namespace { int getAtomMapNo(ROMol::ATOM_BOOKMARK_MAP *map, Atom *atom) { if (map) { for (ROMol::ATOM_BOOKMARK_MAP::const_iterator it = map->begin(); it != map->end(); ++it) { for (auto ait = it->second.begin(); ait != it->second.end(); ++ait) { if (*ait == atom) { return it->first; } } } } return -1; } } // namespace namespace { struct RGroup { Atom *rAtom; Bond::BondType bond_type; int mapno; RGroup(Atom *atom, Bond::BondType type, int curmapno = -1) : rAtom(atom), bond_type(type), mapno(curmapno) {} RGroup(const RGroup &rhs) : rAtom(rhs.rAtom), bond_type(rhs.bond_type), mapno(rhs.mapno) {} }; } // namespace ROMol *reduceProductToSideChains(const ROMOL_SPTR &product, bool addDummyAtoms) { CHECK_INVARIANT(product, "bad molecule"); auto *mol = new RWMol(*product.get()); // CHECK_INVARIANT(productID < rxn.getProducts().size()); // Remove all atoms belonging to the product UNLESS // they are attached to the reactant (inverse r-group) const unsigned int numAtoms = mol->getNumAtoms(); // Go backwards through the atoms so that removing atoms doesn't // muck up the next atom in the loops index. std::vector<unsigned int> atomsToRemove; for (int scaffold_atom_idx = numAtoms - 1; scaffold_atom_idx >= 0; --scaffold_atom_idx) { Atom *scaffold_atom = mol->getAtomWithIdx(rdcast<unsigned int>(scaffold_atom_idx)); // add map no's here from dummy atoms // was this atom in one of the reactant templates? if (scaffold_atom->hasProp(common_properties::reactionMapNum) || !scaffold_atom->hasProp(common_properties::reactantAtomIdx)) { // are we attached to a reactant atom? ROMol::ADJ_ITER nbrIdx, endNbrs; boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(scaffold_atom); std::vector<RGroup> bonds_to_product; while (nbrIdx != endNbrs) { Atom *nbr = mol->getAtomWithIdx(*nbrIdx); if (!nbr->hasProp(common_properties::reactionMapNum) && nbr->hasProp(common_properties::reactantAtomIdx)) { if (nbr->hasProp(WAS_DUMMY)) { bonds_to_product.push_back(RGroup( nbr, mol->getBondBetweenAtoms(scaffold_atom->getIdx(), *nbrIdx) ->getBondType(), nbr->getProp<int>(common_properties::reactionMapNum))); } else { bonds_to_product.push_back(RGroup( nbr, mol->getBondBetweenAtoms(scaffold_atom->getIdx(), *nbrIdx) ->getBondType())); } } ++nbrIdx; } // Search the atom bookmark to see if we can find the original // reaction mapping number to the scaffold_atom // sometimes this is a proper rgroup, so use that mapno // C-C:12 >> C:12 # will probably work // C-C:12-C >> C:12 # probably won't int mapno = -1; if (bonds_to_product.size()) { mapno = getAtomMapNo(mol->getAtomBookmarks(), scaffold_atom); } atomsToRemove.push_back(rdcast<unsigned int>(scaffold_atom_idx)); if (bonds_to_product.size()) { if (addDummyAtoms) { // add dummy atom where the reaction scaffold would have been unsigned int idx = mol->addAtom(); for (const auto &bi : bonds_to_product) { mol->addBond(idx, bi.rAtom->getIdx(), bi.bond_type); int atommapno = bi.mapno == -1 ? mapno : bi.mapno; if (atommapno) { Atom *at = mol->getAtomWithIdx(idx); at->setProp(common_properties::molAtomMapNumber, atommapno); } } } else { for (const auto &bi : bonds_to_product) { int atommapno = bi.mapno == -1 ? mapno : bi.mapno; if (mapno != -1) { std::vector<int> rgroups; std::vector<int> bonds; bi.rAtom->getPropIfPresent(common_properties::_rgroupAtomMaps, rgroups); bi.rAtom->getPropIfPresent(common_properties::_rgroupBonds, bonds); rgroups.push_back(atommapno); // XXX THIS MAY NOT BE SAFE bonds.push_back(static_cast<int>(bi.bond_type)); bi.rAtom->setProp(common_properties::_rgroupAtomMaps, rgroups); bi.rAtom->setProp(common_properties::_rgroupBonds, bonds); } } } } } } for (unsigned int ai : atomsToRemove) { mol->removeAtom(ai); } return mol; } } // namespace RDKit
1
20,801
It is relatively cheap to tell whether a bond is a ring bond, we don't have to perform an SSSR.
rdkit-rdkit
cpp
@@ -18,7 +18,7 @@ const ( nameFlag = "name" svcFlag = "svc" envFlag = "env" - appTypeFlag = "app-type" + svcTypeFlag = "svc-type" profileFlag = "profile" yesFlag = "yes" jsonFlag = "json"
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "fmt" "strconv" "strings" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest" ) // Long flag names. const ( // Common flags. appFlag = "app" nameFlag = "name" svcFlag = "svc" envFlag = "env" appTypeFlag = "app-type" profileFlag = "profile" yesFlag = "yes" jsonFlag = "json" // Command specific flags. dockerFileFlag = "dockerfile" imageTagFlag = "tag" resourceTagsFlag = "resource-tags" stackOutputDirFlag = "output-dir" limitFlag = "limit" followFlag = "follow" sinceFlag = "since" startTimeFlag = "start-time" endTimeFlag = "end-time" envProfilesFlag = "env-profiles" prodEnvFlag = "prod" deployFlag = "deploy" resourcesFlag = "resources" githubURLFlag = "github-url" githubAccessTokenFlag = "github-access-token" gitBranchFlag = "git-branch" envsFlag = "environments" domainNameFlag = "domain" localAppFlag = "local" deleteSecretFlag = "delete-secret" appPortFlag = "port" ) // Short flag names. // A short flag only exists if the flag is mandatory by the command. const ( appFlagShort = "a" nameFlagShort = "n" svcFlagShort = "s" envFlagShort = "e" appTypeFlagShort = "t" dockerFileFlagShort = "d" githubURLFlagShort = "u" githubAccessTokenFlagShort = "t" gitBranchFlagShort = "b" envsFlagShort = "e" ) // Descriptions for flags. var ( appTypeFlagDescription = fmt.Sprintf(`Type of application to create. Must be one of: %s`, strings.Join(quoteAll(manifest.ServiceTypes), ", ")) ) const ( appFlagDescription = "Name of the application." svcFlagDescription = "Name of the service." envFlagDescription = "Name of the environment." pipelineFlagDescription = "Name of the pipeline." profileFlagDescription = "Name of the profile." yesFlagDescription = "Skips confirmation prompt." jsonFlagDescription = "Optional. Outputs in JSON format." dockerFileFlagDescription = "Path to the Dockerfile." imageTagFlagDescription = `Optional. The application's image tag.` resourceTagsFlagDescription = `Optional. Labels with a key and value separated with commas. Allows you to categorize resources.` stackOutputDirFlagDescription = "Optional. Writes the stack template and template configuration to a directory." prodEnvFlagDescription = "If the environment contains production services." limitFlagDescription = "Optional. The maximum number of log events returned." followFlagDescription = "Optional. Specifies if the logs should be streamed." sinceFlagDescription = `Optional. Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of start-time / since may be used.` startTimeFlagDescription = `Optional. Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of start-time / since may be used.` endTimeFlagDescription = `Optional. Only return logs before a specific date (RFC3339). Defaults to all logs. Only one of end-time / follow may be used.` deployTestFlagDescription = `Deploy your application to a "test" environment.` githubURLFlagDescription = "GitHub repository URL for your application." githubAccessTokenFlagDescription = "GitHub personal access token for your repository." gitBranchFlagDescription = "Branch used to trigger your pipeline." pipelineEnvsFlagDescription = "Environments to add to the pipeline." domainNameFlagDescription = "Optional. Your existing custom domain name." resourcesFlagDescription = "Optional. Show the resources of your application." localAppFlagDescription = "Only show applications in the current directory." envProfilesFlagDescription = "Optional. Environments and the profile to use to delete the environment." deleteSecretFlagDescription = "Deletes AWS Secrets Manager secret associated with a pipeline source repository." appPortFlagDescription = "Optional. The port on which your Dockerfile listens." ) func quoteAll(elems []string) []string { quotedElems := make([]string, len(elems)) for i, el := range elems { quotedElems[i] = strconv.Quote(el) } return quotedElems }
1
13,111
Maybe we should add flag aliases for service as well.
aws-copilot-cli
go
@@ -31,10 +31,7 @@ type Block struct { Ticket Ticket `json:"ticket"` // ElectionProof is the vrf proof giving this block's miner authoring rights - ElectionProof crypto.VRFPi - - // EPoStInfo wraps all data for verifying this block's Election PoSt - EPoStInfo EPoStInfo `json:"ePoStInfo"` + ElectionProof crypto.ElectionProof // DrandEntries contain the verifiable oracle randomness used to elect // this block's author leader
1
package block import ( "encoding/json" "fmt" "github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi" fbig "github.com/filecoin-project/specs-actors/actors/abi/big" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" node "github.com/ipfs/go-ipld-format" "github.com/filecoin-project/go-filecoin/internal/pkg/constants" "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" "github.com/filecoin-project/go-filecoin/internal/pkg/drand" e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" ) // Block is a block in the blockchain. type Block struct { // control field for encoding struct as an array _ struct{} `cbor:",toarray"` // Miner is the address of the miner actor that mined this block. Miner address.Address `json:"miner"` // Ticket is the ticket submitted with this block. Ticket Ticket `json:"ticket"` // ElectionProof is the vrf proof giving this block's miner authoring rights ElectionProof crypto.VRFPi // EPoStInfo wraps all data for verifying this block's Election PoSt EPoStInfo EPoStInfo `json:"ePoStInfo"` // DrandEntries contain the verifiable oracle randomness used to elect // this block's author leader DrandEntries []*drand.Entry // Parents is the set of parents this block was based on. Typically one, // but can be several in the case where there were multiple winning ticket- // holders for an epoch. Parents TipSetKey `json:"parents"` // ParentWeight is the aggregate chain weight of the parent set. ParentWeight fbig.Int `json:"parentWeight"` // Height is the chain height of this block. Height abi.ChainEpoch `json:"height"` // StateRoot is the CID of the root of the state tree after application of the messages in the parent tipset // to the parent tipset's state root. StateRoot e.Cid `json:"stateRoot,omitempty"` // MessageReceipts is a list of receipts corresponding to the application of the messages in the parent tipset // to the parent tipset's state root (corresponding to this block's StateRoot). MessageReceipts e.Cid `json:"messageReceipts,omitempty"` // Messages is the set of messages included in this block Messages e.Cid `json:"messages,omitempty"` // The aggregate signature of all BLS signed messages in the block BLSAggregateSig *crypto.Signature `json:"blsAggregateSig"` // The timestamp, in seconds since the Unix epoch, at which this block was created. Timestamp uint64 `json:"timestamp"` // The signature of the miner's worker key over the block BlockSig *crypto.Signature `json:"blocksig"` // ForkSignaling is extra data used by miners to communicate ForkSignaling uint64 cachedCid cid.Cid cachedBytes []byte } // IndexMessagesField is the message field position in the encoded block const IndexMessagesField = 10 // IndexParentsField is the parents field position in the encoded block const IndexParentsField = 5 // Cid returns the content id of this block. func (b *Block) Cid() cid.Cid { if b.cachedCid == cid.Undef { if b.cachedBytes == nil { bytes, err := encoding.Encode(b) if err != nil { panic(err) } b.cachedBytes = bytes } c, err := constants.DefaultCidBuilder.Sum(b.cachedBytes) if err != nil { panic(err) } b.cachedCid = c } return b.cachedCid } // ToNode converts the Block to an IPLD node. func (b *Block) ToNode() node.Node { data, err := encoding.Encode(b) if err != nil { panic(err) } c, err := constants.DefaultCidBuilder.Sum(data) if err != nil { panic(err) } blk, err := blocks.NewBlockWithCid(data, c) if err != nil { panic(err) } node, err := cbor.DecodeBlock(blk) if err != nil { panic(err) } return node } func (b *Block) String() string { errStr := "(error encoding Block)" cid := b.Cid() js, err := json.MarshalIndent(b, "", " ") if err != nil { return errStr } return fmt.Sprintf("Block cid=[%v]: %s", cid, string(js)) } // DecodeBlock decodes raw cbor bytes into a Block. func DecodeBlock(b []byte) (*Block, error) { var out Block if err := encoding.Decode(b, &out); err != nil { return nil, err } out.cachedBytes = b return &out, nil } // Equals returns true if the Block is equal to other. func (b *Block) Equals(other *Block) bool { return b.Cid().Equals(other.Cid()) } // SignatureData returns the block's bytes with a null signature field for // signature creation and verification func (b *Block) SignatureData() []byte { tmp := &Block{ Miner: b.Miner, Ticket: b.Ticket, ElectionProof: b.ElectionProof, Parents: b.Parents, ParentWeight: b.ParentWeight, Height: b.Height, Messages: b.Messages, StateRoot: b.StateRoot, MessageReceipts: b.MessageReceipts, EPoStInfo: b.EPoStInfo, DrandEntries: b.DrandEntries, Timestamp: b.Timestamp, BLSAggregateSig: b.BLSAggregateSig, ForkSignaling: b.ForkSignaling, // BlockSig omitted } return tmp.ToNode().RawData() }
1
23,585
This LGTM but don't we need a winning PoSts field to fully implement the protocol? No need to add here as I'm working on this in another PR, but curious how we can interop with lotus blocks without post proofs.
filecoin-project-venus
go
@@ -12,3 +12,10 @@ import "github.com/iotexproject/iotex-core/blockchain/block" type BlockCreationSubscriber interface { ReceiveBlock(*block.Block) error } + +// pubSub includes Subscriber, buffered channel for storing the pending blocks and cancel channel to end the handler thread +type pubSub struct { + Blocklistener BlockCreationSubscriber + BlocklistenerBuffer chan *block.Block + BlocklistenerCancel chan interface{} +}
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import "github.com/iotexproject/iotex-core/blockchain/block" // BlockCreationSubscriber is an interface which will get notified when a block is created type BlockCreationSubscriber interface { ReceiveBlock(*block.Block) error }
1
20,645
can you move the [] into pubSub? so Blockchain just contains a pubSub, not []
iotexproject-iotex-core
go
@@ -224,6 +224,11 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable { queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1)); queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE); enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false); + + useCircuitBreakers = getBool("circuitBreaker/useCircuitBreakers", false); + memoryCircuitBreakerThresholdPct = getInt("circuitBreaker/memoryCircuitBreakerThresholdPct", 100); + + validateMemoryBreakerThreshold(); useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.core; import javax.xml.parsers.ParserConfigurationException; import javax.xml.xpath.XPathConstants; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.lang.invoke.MethodHandles; import java.net.MalformedURLException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.ImmutableList; import org.apache.commons.io.FileUtils; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Version; import org.apache.solr.client.solrj.io.stream.expr.Expressible; import org.apache.solr.cloud.RecoveryStrategy; import org.apache.solr.cloud.ZkSolrResourceLoader; import org.apache.solr.common.MapSerializable; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.util.IOUtils; import org.apache.solr.handler.component.SearchComponent; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.QueryResponseWriter; import org.apache.solr.response.transform.TransformerFactory; import org.apache.solr.rest.RestManager; import org.apache.solr.schema.IndexSchema; import org.apache.solr.schema.IndexSchemaFactory; import org.apache.solr.search.CacheConfig; import org.apache.solr.search.CaffeineCache; import org.apache.solr.search.QParserPlugin; import org.apache.solr.search.SolrCache; import org.apache.solr.search.ValueSourceParser; import org.apache.solr.search.stats.StatsCache; import org.apache.solr.servlet.SolrRequestParsers; import org.apache.solr.spelling.QueryConverter; import org.apache.solr.update.SolrIndexConfig; import org.apache.solr.update.UpdateLog; import org.apache.solr.update.processor.UpdateRequestProcessorChain; import org.apache.solr.update.processor.UpdateRequestProcessorFactory; import org.apache.solr.util.DOMUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; import static org.apache.solr.common.params.CommonParams.NAME; import static org.apache.solr.common.params.CommonParams.PATH; import static org.apache.solr.common.util.Utils.fromJSON; import static org.apache.solr.common.util.Utils.makeMap; import static org.apache.solr.core.ConfigOverlay.ZNODEVER; import static org.apache.solr.core.SolrConfig.PluginOpts.LAZY; import static org.apache.solr.core.SolrConfig.PluginOpts.MULTI_OK; import static org.apache.solr.core.SolrConfig.PluginOpts.NOOP; import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_CLASS; import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME; import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME_IN_OVERLAY; /** * Provides a static reference to a Config object modeling the main * configuration data for a Solr instance -- typically found in * "solrconfig.xml". */ public class SolrConfig extends XmlConfigFile implements MapSerializable { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); public static final String DEFAULT_CONF_FILE = "solrconfig.xml"; private RequestParams requestParams; public enum PluginOpts { MULTI_OK, REQUIRE_NAME, REQUIRE_NAME_IN_OVERLAY, REQUIRE_CLASS, LAZY, // EnumSet.of and/or EnumSet.copyOf(Collection) are annoying // because of type determination NOOP } private int multipartUploadLimitKB; private int formUploadLimitKB; private boolean enableRemoteStreams; private boolean enableStreamBody; private boolean handleSelect; private boolean addHttpRequestToContext; private final SolrRequestParsers solrRequestParsers; /** * TEST-ONLY: Creates a configuration instance from an instance directory and file name * @param instanceDir the directory used to create the resource loader * @param name the configuration name used by the loader if the stream is null */ public SolrConfig(Path instanceDir, String name) throws ParserConfigurationException, IOException, SAXException { this(new SolrResourceLoader(instanceDir), name, true, null); } public static SolrConfig readFromResourceLoader(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties) { try { return new SolrConfig(loader, name, isConfigsetTrusted, substitutableProperties); } catch (Exception e) { String resource; if (loader instanceof ZkSolrResourceLoader) { resource = name; } else { resource = Paths.get(loader.getConfigDir()).resolve(name).toString(); } throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading solr config from " + resource, e); } } /** * Creates a configuration instance from a resource loader, a configuration name and a stream. * If the stream is null, the resource loader will open the configuration stream. * If the stream is not null, no attempt to load the resource will occur (the name is not used). * @param loader the resource loader * @param name the configuration name * @param isConfigsetTrusted false if configset was uploaded using unsecured configset upload API, true otherwise * @param substitutableProperties optional properties to substitute into the XML */ private SolrConfig(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties) throws ParserConfigurationException, IOException, SAXException { // insist we have non-null substituteProperties; it might get overlayed super(loader, name, null, "/config/", substitutableProperties == null ? new Properties() : substitutableProperties); getOverlay();//just in case it is not initialized getRequestParams(); initLibs(loader, isConfigsetTrusted); luceneMatchVersion = SolrConfig.parseLuceneVersionString(getVal(IndexSchema.LUCENE_MATCH_VERSION_PARAM, true)); log.info("Using Lucene MatchVersion: {}", luceneMatchVersion); String indexConfigPrefix; // Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_4_0_0. // For older solrconfig.xml's we allow the old sections, but never mixed with the new <indexConfig> boolean hasDeprecatedIndexConfig = (getNode("indexDefaults", false) != null) || (getNode("mainIndex", false) != null); if (hasDeprecatedIndexConfig) { throw new SolrException(ErrorCode.FORBIDDEN, "<indexDefaults> and <mainIndex> configuration sections are discontinued. Use <indexConfig> instead."); } else { indexConfigPrefix = "indexConfig"; } assertWarnOrFail("The <nrtMode> config has been discontinued and NRT mode is always used by Solr." + " This config will be removed in future versions.", getNode(indexConfigPrefix + "/nrtMode", false) == null, true ); assertWarnOrFail("Solr no longer supports forceful unlocking via the 'unlockOnStartup' option. "+ "This is no longer necessary for the default lockType except in situations where "+ "it would be dangerous and should not be done. For other lockTypes and/or "+ "directoryFactory options it may also be dangerous and users must resolve "+ "problematic locks manually.", null == getNode(indexConfigPrefix + "/unlockOnStartup", false), true // 'fail' in trunk ); // Parse indexConfig section, using mainIndex as backup in case old config is used indexConfig = new SolrIndexConfig(this, "indexConfig", null); booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", IndexSearcher.getMaxClauseCount()); if (IndexSearcher.getMaxClauseCount() < booleanQueryMaxClauseCount) { log.warn("solrconfig.xml: <maxBooleanClauses> of {} is greater than global limit of {} {}" , booleanQueryMaxClauseCount, IndexSearcher.getMaxClauseCount() , "and will have no effect set 'maxBooleanClauses' in solr.xml to increase global limit"); } // Warn about deprecated / discontinued parameters // boolToFilterOptimizer has had no effect since 3.1 if (get("query/boolTofilterOptimizer", null) != null) log.warn("solrconfig.xml: <boolTofilterOptimizer> is currently not implemented and has no effect."); if (get("query/HashDocSet", null) != null) log.warn("solrconfig.xml: <HashDocSet> is deprecated and no longer used."); // TODO: Old code - in case somebody wants to re-enable. Also see SolrIndexSearcher#search() // filtOptEnabled = getBool("query/boolTofilterOptimizer/@enabled", false); // filtOptCacheSize = getInt("query/boolTofilterOptimizer/@cacheSize",32); // filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f); useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false); queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1)); queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE); enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false); useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true); filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache"); queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache"); documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache"); CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache"); if (conf == null) { Map<String, String> args = new HashMap<>(); args.put(NAME, "fieldValueCache"); args.put("size", "10000"); args.put("initialSize", "10"); args.put("showItems", "-1"); conf = new CacheConfig(CaffeineCache.class, args, null); } fieldValueCacheConfig = conf; useColdSearcher = getBool("query/useColdSearcher", false); dataDir = get("dataDir", null); if (dataDir != null && dataDir.length() == 0) dataDir = null; org.apache.solr.search.SolrIndexSearcher.initRegenerators(this); if (get("jmx", null) != null) { log.warn("solrconfig.xml: <jmx> is no longer supported, use solr.xml:/metrics/reporter section instead"); } httpCachingConfig = new HttpCachingConfig(this); maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1); slowQueryThresholdMillis = getInt("query/slowQueryThresholdMillis", -1); for (SolrPluginInfo plugin : plugins) loadPluginInfo(plugin); Map<String, CacheConfig> userCacheConfigs = CacheConfig.getMultipleConfigs(this, "query/cache"); List<PluginInfo> caches = getPluginInfos(SolrCache.class.getName()); if (!caches.isEmpty()) { for (PluginInfo c : caches) { userCacheConfigs.put(c.name, CacheConfig.getConfig(this, "cache", c.attributes, null)); } } this.userCacheConfigs = Collections.unmodifiableMap(userCacheConfigs); updateHandlerInfo = loadUpdatehandlerInfo(); multipartUploadLimitKB = getInt( "requestDispatcher/requestParsers/@multipartUploadLimitInKB", Integer.MAX_VALUE); if (multipartUploadLimitKB == -1) multipartUploadLimitKB = Integer.MAX_VALUE; formUploadLimitKB = getInt( "requestDispatcher/requestParsers/@formdataUploadLimitInKB", Integer.MAX_VALUE); if (formUploadLimitKB == -1) formUploadLimitKB = Integer.MAX_VALUE; enableRemoteStreams = getBool( "requestDispatcher/requestParsers/@enableRemoteStreaming", false); enableStreamBody = getBool( "requestDispatcher/requestParsers/@enableStreamBody", false); handleSelect = getBool( "requestDispatcher/@handleSelect", false); addHttpRequestToContext = getBool( "requestDispatcher/requestParsers/@addHttpRequestToContext", false); List<PluginInfo> argsInfos = getPluginInfos(InitParams.class.getName()); if (argsInfos != null) { Map<String, InitParams> argsMap = new HashMap<>(); for (PluginInfo p : argsInfos) { InitParams args = new InitParams(p); argsMap.put(args.name == null ? String.valueOf(args.hashCode()) : args.name, args); } this.initParams = Collections.unmodifiableMap(argsMap); } solrRequestParsers = new SolrRequestParsers(this); log.debug("Loaded SolrConfig: {}", name); } private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false); public static final Version parseLuceneVersionString(final String matchVersion) { final Version version; try { version = Version.parseLeniently(matchVersion); } catch (ParseException pe) { throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid luceneMatchVersion. Should be of the form 'V.V.V' (e.g. 4.8.0)", pe); } if (version == Version.LATEST && !versionWarningAlreadyLogged.getAndSet(true)) { log.warn("You should not use LATEST as luceneMatchVersion property: " + "if you use this setting, and then Solr upgrades to a newer release of Lucene, " + "sizable changes may happen. If precise back compatibility is important " + "then you should instead explicitly specify an actual Lucene version."); } return version; } public static final List<SolrPluginInfo> plugins = ImmutableList.<SolrPluginInfo>builder() .add(new SolrPluginInfo(SolrRequestHandler.class, SolrRequestHandler.TYPE, REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY)) .add(new SolrPluginInfo(QParserPlugin.class, "queryParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(Expressible.class, "expressible", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(QueryResponseWriter.class, "queryResponseWriter", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY)) .add(new SolrPluginInfo(ValueSourceParser.class, "valueSourceParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(TransformerFactory.class, "transformer", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(SearchComponent.class, "searchComponent", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(UpdateRequestProcessorFactory.class, "updateProcessor", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(SolrCache.class, "cache", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) // TODO: WTF is up with queryConverter??? // it apparently *only* works as a singleton? - SOLR-4304 // and even then -- only if there is a single SpellCheckComponent // because of queryConverter.setIndexAnalyzer .add(new SolrPluginInfo(QueryConverter.class, "queryConverter", REQUIRE_NAME, REQUIRE_CLASS)) .add(new SolrPluginInfo(PluginBag.RuntimeLib.class, "runtimeLib", REQUIRE_NAME, MULTI_OK)) // this is hackish, since it picks up all SolrEventListeners, // regardless of when/how/why they are used (or even if they are // declared outside of the appropriate context) but there's no nice // way around that in the PluginInfo framework .add(new SolrPluginInfo(InitParams.class, InitParams.TYPE, MULTI_OK, REQUIRE_NAME_IN_OVERLAY)) .add(new SolrPluginInfo(SolrEventListener.class, "//listener", REQUIRE_CLASS, MULTI_OK, REQUIRE_NAME_IN_OVERLAY)) .add(new SolrPluginInfo(DirectoryFactory.class, "directoryFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(RecoveryStrategy.Builder.class, "recoveryStrategy")) .add(new SolrPluginInfo(IndexDeletionPolicy.class, "indexConfig/deletionPolicy", REQUIRE_CLASS)) .add(new SolrPluginInfo(CodecFactory.class, "codecFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(IndexReaderFactory.class, "indexReaderFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(UpdateRequestProcessorChain.class, "updateRequestProcessorChain", MULTI_OK)) .add(new SolrPluginInfo(UpdateLog.class, "updateHandler/updateLog")) .add(new SolrPluginInfo(IndexSchemaFactory.class, "schemaFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(RestManager.class, "restManager")) .add(new SolrPluginInfo(StatsCache.class, "statsCache", REQUIRE_CLASS)) .build(); public static final Map<String, SolrPluginInfo> classVsSolrPluginInfo; static { Map<String, SolrPluginInfo> map = new HashMap<>(); for (SolrPluginInfo plugin : plugins) map.put(plugin.clazz.getName(), plugin); classVsSolrPluginInfo = Collections.unmodifiableMap(map); } public static class SolrPluginInfo { @SuppressWarnings({"rawtypes"}) public final Class clazz; public final String tag; public final Set<PluginOpts> options; @SuppressWarnings({"unchecked", "rawtypes"}) private SolrPluginInfo(Class clz, String tag, PluginOpts... opts) { this.clazz = clz; this.tag = tag; this.options = opts == null ? Collections.EMPTY_SET : EnumSet.of(NOOP, opts); } public String getCleanTag() { return tag.replaceAll("/", ""); } public String getTagCleanLower() { return getCleanTag().toLowerCase(Locale.ROOT); } } @SuppressWarnings({"unchecked", "rawtypes"}) public static ConfigOverlay getConfigOverlay(SolrResourceLoader loader) { InputStream in = null; InputStreamReader isr = null; try { try { in = loader.openResource(ConfigOverlay.RESOURCE_NAME); } catch (IOException e) { // TODO: we should be explicitly looking for file not found exceptions // and logging if it's not the expected IOException // hopefully no problem, assume no overlay.json file return new ConfigOverlay(Collections.EMPTY_MAP, -1); } int version = 0; // will be always 0 for file based resourceLoader if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) { version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion(); log.debug("Config overlay loaded. version : {} ", version); } Map m = (Map) fromJSON(in); return new ConfigOverlay(m, version); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading config overlay", e); } finally { IOUtils.closeQuietly(isr); IOUtils.closeQuietly(in); } } private Map<String, InitParams> initParams = Collections.emptyMap(); public Map<String, InitParams> getInitParams() { return initParams; } protected UpdateHandlerInfo loadUpdatehandlerInfo() { return new UpdateHandlerInfo(get("updateHandler/@class", null), getInt("updateHandler/autoCommit/maxDocs", -1), getInt("updateHandler/autoCommit/maxTime", -1), convertHeapOptionStyleConfigStringToBytes(get("updateHandler/autoCommit/maxSize", "")), getBool("updateHandler/indexWriter/closeWaitsForMerges", true), getBool("updateHandler/autoCommit/openSearcher", true), getInt("updateHandler/autoSoftCommit/maxDocs", -1), getInt("updateHandler/autoSoftCommit/maxTime", -1), getBool("updateHandler/commitWithin/softCommit", true)); } /** * Converts a Java heap option-like config string to bytes. Valid suffixes are: 'k', 'm', 'g' * (case insensitive). If there is no suffix, the default unit is bytes. * For example, 50k = 50KB, 20m = 20MB, 4g = 4GB, 300 = 300 bytes * @param configStr the config setting to parse * @return the size, in bytes. -1 if the given config string is empty */ protected static long convertHeapOptionStyleConfigStringToBytes(String configStr) { if (configStr.isEmpty()) { return -1; } long multiplier = 1; String numericValueStr = configStr; char suffix = Character.toLowerCase(configStr.charAt(configStr.length() - 1)); if (Character.isLetter(suffix)) { if (suffix == 'k') { multiplier = FileUtils.ONE_KB; } else if (suffix == 'm') { multiplier = FileUtils.ONE_MB; } else if (suffix == 'g') { multiplier = FileUtils.ONE_GB; } else { throw new RuntimeException("Invalid suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). " + "No suffix means the amount is in bytes. "); } numericValueStr = configStr.substring(0, configStr.length() - 1); } try { return Long.parseLong(numericValueStr) * multiplier; } catch (NumberFormatException e) { throw new RuntimeException("Invalid format. The config setting should be a long with an " + "optional letter suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). " + "No suffix means the amount is in bytes."); } } private void loadPluginInfo(SolrPluginInfo pluginInfo) { boolean requireName = pluginInfo.options.contains(REQUIRE_NAME); boolean requireClass = pluginInfo.options.contains(REQUIRE_CLASS); List<PluginInfo> result = readPluginInfos(pluginInfo.tag, requireName, requireClass); if (1 < result.size() && !pluginInfo.options.contains(MULTI_OK)) { throw new SolrException (SolrException.ErrorCode.SERVER_ERROR, "Found " + result.size() + " configuration sections when at most " + "1 is allowed matching expression: " + pluginInfo.getCleanTag()); } if (!result.isEmpty()) pluginStore.put(pluginInfo.clazz.getName(), result); } public List<PluginInfo> readPluginInfos(String tag, boolean requireName, boolean requireClass) { ArrayList<PluginInfo> result = new ArrayList<>(); NodeList nodes = (NodeList) evaluate(tag, XPathConstants.NODESET); for (int i = 0; i < nodes.getLength(); i++) { PluginInfo pluginInfo = new PluginInfo(nodes.item(i), "[solrconfig.xml] " + tag, requireName, requireClass); if (pluginInfo.isEnabled()) result.add(pluginInfo); } return result; } public SolrRequestParsers getRequestParsers() { return solrRequestParsers; } /* The set of materialized parameters: */ public final int booleanQueryMaxClauseCount; // SolrIndexSearcher - nutch optimizer -- Disabled since 3.1 // public final boolean filtOptEnabled; // public final int filtOptCacheSize; // public final float filtOptThreshold; // SolrIndexSearcher - caches configurations public final CacheConfig filterCacheConfig; public final CacheConfig queryResultCacheConfig; public final CacheConfig documentCacheConfig; public final CacheConfig fieldValueCacheConfig; public final Map<String, CacheConfig> userCacheConfigs; // SolrIndexSearcher - more... public final boolean useFilterForSortedQuery; public final int queryResultWindowSize; public final int queryResultMaxDocsCached; public final boolean enableLazyFieldLoading; public final boolean useRangeVersionsForPeerSync; // IndexConfig settings public final SolrIndexConfig indexConfig; protected UpdateHandlerInfo updateHandlerInfo; private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>(); public final int maxWarmingSearchers; public final boolean useColdSearcher; public final Version luceneMatchVersion; protected String dataDir; public final int slowQueryThresholdMillis; // threshold above which a query is considered slow private final HttpCachingConfig httpCachingConfig; public HttpCachingConfig getHttpCachingConfig() { return httpCachingConfig; } public static class HttpCachingConfig implements MapSerializable { /** * config xpath prefix for getting HTTP Caching options */ private final static String CACHE_PRE = "requestDispatcher/httpCaching/"; /** * For extracting Expires "ttl" from <cacheControl> config */ private final static Pattern MAX_AGE = Pattern.compile("\\bmax-age=(\\d+)"); @Override public Map<String, Object> toMap(Map<String, Object> map) { return makeMap("never304", never304, "etagSeed", etagSeed, "lastModFrom", lastModFrom.name().toLowerCase(Locale.ROOT), "cacheControl", cacheControlHeader); } public static enum LastModFrom { OPENTIME, DIRLASTMOD, BOGUS; /** * Input must not be null */ public static LastModFrom parse(final String s) { try { return valueOf(s.toUpperCase(Locale.ROOT)); } catch (Exception e) { log.warn("Unrecognized value for lastModFrom: {}", s, e); return BOGUS; } } } private final boolean never304; private final String etagSeed; private final String cacheControlHeader; private final Long maxAge; private final LastModFrom lastModFrom; private HttpCachingConfig(SolrConfig conf) { never304 = conf.getBool(CACHE_PRE + "@never304", false); etagSeed = conf.get(CACHE_PRE + "@etagSeed", "Solr"); lastModFrom = LastModFrom.parse(conf.get(CACHE_PRE + "@lastModFrom", "openTime")); cacheControlHeader = conf.get(CACHE_PRE + "cacheControl", null); Long tmp = null; // maxAge if (null != cacheControlHeader) { try { final Matcher ttlMatcher = MAX_AGE.matcher(cacheControlHeader); final String ttlStr = ttlMatcher.find() ? ttlMatcher.group(1) : null; tmp = (null != ttlStr && !"".equals(ttlStr)) ? Long.valueOf(ttlStr) : null; } catch (Exception e) { log.warn("Ignoring exception while attempting to extract max-age from cacheControl config: {}" , cacheControlHeader, e); } } maxAge = tmp; } public boolean isNever304() { return never304; } public String getEtagSeed() { return etagSeed; } /** * null if no Cache-Control header */ public String getCacheControlHeader() { return cacheControlHeader; } /** * null if no max age limitation */ public Long getMaxAge() { return maxAge; } public LastModFrom getLastModFrom() { return lastModFrom; } } public static class UpdateHandlerInfo implements MapSerializable { public final String className; public final int autoCommmitMaxDocs, autoCommmitMaxTime, autoSoftCommmitMaxDocs, autoSoftCommmitMaxTime; public final long autoCommitMaxSizeBytes; public final boolean indexWriterCloseWaitsForMerges; public final boolean openSearcher; // is opening a new searcher part of hard autocommit? public final boolean commitWithinSoftCommit; /** * @param autoCommmitMaxDocs set -1 as default * @param autoCommmitMaxTime set -1 as default * @param autoCommitMaxSize set -1 as default */ public UpdateHandlerInfo(String className, int autoCommmitMaxDocs, int autoCommmitMaxTime, long autoCommitMaxSize, boolean indexWriterCloseWaitsForMerges, boolean openSearcher, int autoSoftCommmitMaxDocs, int autoSoftCommmitMaxTime, boolean commitWithinSoftCommit) { this.className = className; this.autoCommmitMaxDocs = autoCommmitMaxDocs; this.autoCommmitMaxTime = autoCommmitMaxTime; this.autoCommitMaxSizeBytes = autoCommitMaxSize; this.indexWriterCloseWaitsForMerges = indexWriterCloseWaitsForMerges; this.openSearcher = openSearcher; this.autoSoftCommmitMaxDocs = autoSoftCommmitMaxDocs; this.autoSoftCommmitMaxTime = autoSoftCommmitMaxTime; this.commitWithinSoftCommit = commitWithinSoftCommit; } @Override @SuppressWarnings({"unchecked", "rawtypes"}) public Map<String, Object> toMap(Map<String, Object> map) { LinkedHashMap result = new LinkedHashMap(); result.put("indexWriter", makeMap("closeWaitsForMerges", indexWriterCloseWaitsForMerges)); result.put("commitWithin", makeMap("softCommit", commitWithinSoftCommit)); result.put("autoCommit", makeMap( "maxDocs", autoCommmitMaxDocs, "maxTime", autoCommmitMaxTime, "openSearcher", openSearcher )); result.put("autoSoftCommit", makeMap("maxDocs", autoSoftCommmitMaxDocs, "maxTime", autoSoftCommmitMaxTime)); return result; } } // public Map<String, List<PluginInfo>> getUpdateProcessorChainInfo() { return updateProcessorChainInfo; } public UpdateHandlerInfo getUpdateHandlerInfo() { return updateHandlerInfo; } public String getDataDir() { return dataDir; } /** * SolrConfig keeps a repository of plugins by the type. The known interfaces are the types. * * @param type The key is FQN of the plugin class there are a few known types : SolrFormatter, SolrFragmenter * SolrRequestHandler,QParserPlugin, QueryResponseWriter,ValueSourceParser, * SearchComponent, QueryConverter, SolrEventListener, DirectoryFactory, * IndexDeletionPolicy, IndexReaderFactory, {@link TransformerFactory} */ @SuppressWarnings({"unchecked", "rawtypes"}) public List<PluginInfo> getPluginInfos(String type) { List<PluginInfo> result = pluginStore.get(type); SolrPluginInfo info = classVsSolrPluginInfo.get(type); if (info != null && (info.options.contains(REQUIRE_NAME) || info.options.contains(REQUIRE_NAME_IN_OVERLAY))) { Map<String, Map> infos = overlay.getNamedPlugins(info.getCleanTag()); if (!infos.isEmpty()) { LinkedHashMap<String, PluginInfo> map = new LinkedHashMap<>(); if (result != null) for (PluginInfo pluginInfo : result) { //just create a UUID for the time being so that map key is not null String name = pluginInfo.name == null ? UUID.randomUUID().toString().toLowerCase(Locale.ROOT) : pluginInfo.name; map.put(name, pluginInfo); } for (Map.Entry<String, Map> e : infos.entrySet()) { map.put(e.getKey(), new PluginInfo(info.getCleanTag(), e.getValue())); } result = new ArrayList<>(map.values()); } } return result == null ? Collections.<PluginInfo>emptyList() : result; } public PluginInfo getPluginInfo(String type) { List<PluginInfo> result = pluginStore.get(type); if (result == null || result.isEmpty()) { return null; } if (1 == result.size()) { return result.get(0); } throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Multiple plugins configured for type: " + type); } private void initLibs(SolrResourceLoader loader, boolean isConfigsetTrusted) { // TODO Want to remove SolrResourceLoader.getInstancePath; it can be on a Standalone subclass. // For Zk subclass, it's needed for the time being as well. We could remove that one if we remove two things // in SolrCloud: (1) instancePath/lib and (2) solrconfig lib directives with relative paths. Can wait till 9.0. Path instancePath = loader.getInstancePath(); List<URL> urls = new ArrayList<>(); Path libPath = instancePath.resolve("lib"); if (Files.exists(libPath)) { try { urls.addAll(SolrResourceLoader.getURLs(libPath)); } catch (IOException e) { log.warn("Couldn't add files from {} to classpath: {}", libPath, e); } } NodeList nodes = (NodeList) evaluate("lib", XPathConstants.NODESET); if (nodes == null || nodes.getLength() == 0) return; if (!isConfigsetTrusted) { throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place," + " and use of <lib> is not available for collections with untrusted configsets. To use this component, re-upload the configset" + " after enabling authentication and authorization."); } for (int i = 0; i < nodes.getLength(); i++) { Node node = nodes.item(i); String baseDir = DOMUtil.getAttr(node, "dir"); String path = DOMUtil.getAttr(node, PATH); if (null != baseDir) { // :TODO: add support for a simpler 'glob' mutually exclusive of regex Path dir = instancePath.resolve(baseDir); String regex = DOMUtil.getAttr(node, "regex"); try { if (regex == null) urls.addAll(SolrResourceLoader.getURLs(dir)); else urls.addAll(SolrResourceLoader.getFilteredURLs(dir, regex)); } catch (IOException e) { log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e); } } else if (null != path) { final Path dir = instancePath.resolve(path); try { urls.add(dir.toUri().toURL()); } catch (MalformedURLException e) { log.warn("Couldn't add file {} to classpath: {}", dir, e); } } else { throw new RuntimeException("lib: missing mandatory attributes: 'dir' or 'path'"); } } loader.addToClassLoader(urls); loader.reloadLuceneSPI(); } public int getMultipartUploadLimitKB() { return multipartUploadLimitKB; } public int getFormUploadLimitKB() { return formUploadLimitKB; } public boolean isHandleSelect() { return handleSelect; } public boolean isAddHttpRequestToContext() { return addHttpRequestToContext; } public boolean isEnableRemoteStreams() { return enableRemoteStreams; } public boolean isEnableStreamBody() { return enableStreamBody; } @Override public int getInt(String path) { return getInt(path, 0); } @Override public int getInt(String path, int def) { Object val = overlay.getXPathProperty(path); if (val != null) return Integer.parseInt(val.toString()); return super.getInt(path, def); } @Override public boolean getBool(String path, boolean def) { Object val = overlay.getXPathProperty(path); if (val != null) return Boolean.parseBoolean(val.toString()); return super.getBool(path, def); } @Override public String get(String path) { Object val = overlay.getXPathProperty(path, true); return val != null ? val.toString() : super.get(path); } @Override public String get(String path, String def) { Object val = overlay.getXPathProperty(path, true); return val != null ? val.toString() : super.get(path, def); } @Override @SuppressWarnings({"unchecked", "rawtypes"}) public Map<String, Object> toMap(Map<String, Object> result) { if (getZnodeVersion() > -1) result.put(ZNODEVER, getZnodeVersion()); result.put(IndexSchema.LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion); result.put("updateHandler", getUpdateHandlerInfo()); Map m = new LinkedHashMap(); result.put("query", m); m.put("useFilterForSortedQuery", useFilterForSortedQuery); m.put("queryResultWindowSize", queryResultWindowSize); m.put("queryResultMaxDocsCached", queryResultMaxDocsCached); m.put("enableLazyFieldLoading", enableLazyFieldLoading); m.put("maxBooleanClauses", booleanQueryMaxClauseCount); for (SolrPluginInfo plugin : plugins) { List<PluginInfo> infos = getPluginInfos(plugin.clazz.getName()); if (infos == null || infos.isEmpty()) continue; String tag = plugin.getCleanTag(); tag = tag.replace("/", ""); if (plugin.options.contains(PluginOpts.REQUIRE_NAME)) { LinkedHashMap items = new LinkedHashMap(); for (PluginInfo info : infos) { //TODO remove after fixing https://issues.apache.org/jira/browse/SOLR-13706 if (info.type.equals("searchComponent") && info.name.equals("highlight")) continue; items.put(info.name, info); } for (Map.Entry e : overlay.getNamedPlugins(plugin.tag).entrySet()) items.put(e.getKey(), e.getValue()); result.put(tag, items); } else { if (plugin.options.contains(MULTI_OK)) { ArrayList<MapSerializable> l = new ArrayList<>(); for (PluginInfo info : infos) l.add(info); result.put(tag, l); } else { result.put(tag, infos.get(0)); } } } addCacheConfig(m, filterCacheConfig, queryResultCacheConfig, documentCacheConfig, fieldValueCacheConfig); m = new LinkedHashMap(); result.put("requestDispatcher", m); m.put("handleSelect", handleSelect); if (httpCachingConfig != null) m.put("httpCaching", httpCachingConfig); m.put("requestParsers", makeMap("multipartUploadLimitKB", multipartUploadLimitKB, "formUploadLimitKB", formUploadLimitKB, "addHttpRequestToContext", addHttpRequestToContext)); if (indexConfig != null) result.put("indexConfig", indexConfig); m = new LinkedHashMap(); result.put("peerSync", m); m.put("useRangeVersions", useRangeVersionsForPeerSync); //TODO there is more to add return result; } @SuppressWarnings({"unchecked", "rawtypes"}) private void addCacheConfig(Map queryMap, CacheConfig... cache) { if (cache == null) return; for (CacheConfig config : cache) if (config != null) queryMap.put(config.getNodeName(), config); } @Override public Properties getSubstituteProperties() { Map<String, Object> p = getOverlay().getUserProps(); if (p == null || p.isEmpty()) return super.getSubstituteProperties(); Properties result = new Properties(super.getSubstituteProperties()); result.putAll(p); return result; } private ConfigOverlay overlay; public ConfigOverlay getOverlay() { if (overlay == null) { overlay = getConfigOverlay(getResourceLoader()); } return overlay; } public RequestParams getRequestParams() { if (requestParams == null) { return refreshRequestParams(); } return requestParams; } public RequestParams refreshRequestParams() { requestParams = RequestParams.getFreshRequestParams(getResourceLoader(), requestParams); if (log.isDebugEnabled()) { log.debug("current version of requestparams : {}", requestParams.getZnodeVersion()); } return requestParams; } }
1
35,287
I don't think 100 is a safe default here, since later we check that the value is between 50-95.
apache-lucene-solr
java
@@ -49,5 +49,9 @@ namespace NLog.Layouts /// Layout renderer method can handle concurrent threads /// </summary> ThreadSafe = 1, + /// <summary> + /// Layout renderer method is agnostic to current thread context + /// </summary> + ThreadAgnostic = 2 | ThreadSafe, } }
1
// // Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Layouts { using System; /// <summary> /// Options available for <see cref="Layout.CreateFromMethod"/> /// </summary> [Flags] public enum LayoutRenderOptions { /// <summary> /// Default options /// </summary> None = 0, /// <summary> /// Layout renderer method can handle concurrent threads /// </summary> ThreadSafe = 1, } }
1
20,726
I would be nice if we could describe this without the word "agnostic"
NLog-NLog
.cs
@@ -406,6 +406,16 @@ class KNNClassifier(object): print " active inputs:", _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) + if isSparse > 0: + isSorted = all(inputPattern[i] <= inputPattern[i+1] + for i in xrange(len(inputPattern)-1)) + if not isSorted: + raise RuntimeError("Sparse inputPattern must be sorted.") + + if not all(bit < isSparse for bit in inputPattern): + raise RuntimeError("Sparse inputPattern contains an index outside of" + "the dense representation's bounds.") + if rowID is None: rowID = self._iterationIdx
1
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """This module implements a k nearest neighbor classifier.""" import numpy from nupic.bindings.math import (NearestNeighbor, min_score_per_category) g_debugPrefix = "KNN" KNNCLASSIFIER_VERSION = 1 def _labeledInput(activeInputs, cellsPerCol=32): """Print the list of [column, cellIdx] indices for each of the active cells in activeInputs. """ if cellsPerCol == 0: cellsPerCol = 1 cols = activeInputs.size / cellsPerCol activeInputs = activeInputs.reshape(cols, cellsPerCol) (cols, cellIdxs) = activeInputs.nonzero() if len(cols) == 0: return "NONE" items = ["(%d): " % (len(cols))] prevCol = -1 for (col,cellIdx) in zip(cols, cellIdxs): if col != prevCol: if prevCol != -1: items.append("] ") items.append("Col %d: [" % col) prevCol = col items.append("%d," % cellIdx) items.append("]") return " ".join(items) class KNNClassifier(object): """ This class implements NuPIC's k Nearest Neighbor Classifier. KNN is very useful as a basic classifier for many situations. This implementation contains many enhancements that are useful for HTM experiments. These enhancements include an optimized C++ class for sparse vectors, support for continuous online learning, support for various distance methods (including Lp-norm and raw overlap), support for performing SVD on the input vectors (very useful for large vectors), support for a fixed-size KNN, and a mechanism to store custom ID's for each vector. """ def __init__(self, k=1, exact=False, distanceNorm=2.0, distanceMethod="norm", distThreshold=0, doBinarization=False, binarizationThreshold=0.5, useSparseMemory=True, sparseThreshold=0.1, relativeThreshold=False, numWinners=0, numSVDSamples=None, numSVDDims=None, fractionOfMax=None, verbosity=0, maxStoredPatterns=-1, replaceDuplicates=False, cellsPerCol=0, minSparsity=0.0): """Constructor for the kNN classifier. @param k (int) The number of nearest neighbors used in the classification of patterns. Must be odd @param exact (boolean) If true, patterns must match exactly when assigning class labels @param distanceNorm (int) When distance method is "norm", this specifies the p value of the Lp-norm @param distanceMethod (string) The method used to compute distance between input patterns and prototype patterns. The possible options are: "norm": When distanceNorm is 2, this is the euclidean distance, When distanceNorm is 1, this is the manhattan distance In general: sum(abs(x-proto) ^ distanceNorm) ^ (1/distanceNorm) The distances are normalized such that farthest prototype from a given input is 1.0. "rawOverlap": Only appropriate when inputs are binary. This computes: (width of the input) - (# bits of overlap between input and prototype). "pctOverlapOfInput": Only appropriate for binary inputs. This computes 1.0 - (# bits overlap between input and prototype) / (# ON bits in input) "pctOverlapOfProto": Only appropriate for binary inputs. This computes 1.0 - (# bits overlap between input and prototype) / (# ON bits in prototype) "pctOverlapOfLarger": Only appropriate for binary inputs. This computes 1.0 - (# bits overlap between input and prototype) / max(# ON bits in input, # ON bits in prototype) @param distThreshold (float) A threshold on the distance between learned patterns and a new pattern proposed to be learned. The distance must be greater than this threshold in order for the new pattern to be added to the classifier's memory @param doBinarization (boolean) If True, then scalar inputs will be binarized. @param binarizationThreshold (float) If doBinarization is True, this specifies the threshold for the binarization of inputs @param useSparseMemory (boolean) If True, classifier will use a sparse memory matrix @param sparseThreshold (float) If useSparseMemory is True, input variables whose absolute values are less than this threshold will be stored as zero @param relativeThreshold (boolean) Flag specifying whether to multiply sparseThreshold by max value in input @param numWinners (int) Number of elements of the input that are stored. If 0, all elements are stored @param numSVDSamples (int) Number of samples the must occur before a SVD (Singular Value Decomposition) transformation will be performed. If 0, the transformation will never be performed @param numSVDDims (string) Controls dimensions kept after SVD transformation. If "adaptive", the number is chosen automatically @param fractionOfMax (float) If numSVDDims is "adaptive", this controls the smallest singular value that is retained as a fraction of the largest singular value @param verbosity (int) Console verbosity level where 0 is no output and larger integers provide increasing levels of verbosity @param maxStoredPatterns (int) Limits the maximum number of the training patterns stored. When KNN learns in a fixed capacity mode, the unused patterns are deleted once the number of stored patterns is greater than maxStoredPatterns. A value of -1 is no limit @param replaceDuplicates (bool) A boolean flag that determines whether, during learning, the classifier replaces duplicates that match exactly, even if distThreshold is 0. Should be True for online learning @param cellsPerCol (int) If >= 1, input is assumed to be organized into columns, in the same manner as the temporal pooler AND whenever a new prototype is stored, only the start cell (first cell) is stored in any bursting column @param minSparsity (float) If useSparseMemory is set, only vectors with sparsity >= minSparsity will be stored during learning. A value of 0.0 implies all vectors will be stored. A value of 0.1 implies only vectors with at least 10% sparsity will be stored """ self.version = KNNCLASSIFIER_VERSION self.k = k self.exact = exact self.distanceNorm = distanceNorm assert (distanceMethod in ("norm", "rawOverlap", "pctOverlapOfLarger", "pctOverlapOfProto", "pctOverlapOfInput")) self.distanceMethod = distanceMethod self.distThreshold = distThreshold self.doBinarization = doBinarization self.binarizationThreshold = binarizationThreshold self.useSparseMemory = useSparseMemory self.sparseThreshold = sparseThreshold self.relativeThreshold = relativeThreshold self.numWinners = numWinners self.numSVDSamples = numSVDSamples self.numSVDDims = numSVDDims self.fractionOfMax = fractionOfMax if self.numSVDDims=="adaptive": self._adaptiveSVDDims = True else: self._adaptiveSVDDims = False self.verbosity = verbosity self.replaceDuplicates = replaceDuplicates self.cellsPerCol = cellsPerCol self.maxStoredPatterns = maxStoredPatterns self.minSparsity = minSparsity self.clear() def clear(self): """Clears the state of the KNNClassifier.""" self._Memory = None self._numPatterns = 0 self._M = None self._categoryList = [] self._partitionIdList = [] self._partitionIdMap = {} self._finishedLearning = False self._iterationIdx = -1 # Fixed capacity KNN if self.maxStoredPatterns > 0: assert self.useSparseMemory, ("Fixed capacity KNN is implemented only " "in the sparse memory mode") self.fixedCapacity = True self._categoryRecencyList = [] else: self.fixedCapacity = False # Cached value of the store prototype sizes self._protoSizes = None # Used by PCA self._s = None self._vt = None self._nc = None self._mean = None # Used by Network Builder self._specificIndexTraining = False self._nextTrainingIndices = None def _doubleMemoryNumRows(self): m = 2 * self._Memory.shape[0] n = self._Memory.shape[1] self._Memory = numpy.resize(self._Memory,(m,n)) self._M = self._Memory[:self._numPatterns] def _sparsifyVector(self, inputPattern, doWinners=False): # Do sparsification, using a relative or absolute threshold if not self.relativeThreshold: inputPattern = inputPattern*(abs(inputPattern) > self.sparseThreshold) elif self.sparseThreshold > 0: inputPattern = inputPattern * \ (abs(inputPattern) > (self.sparseThreshold * abs(inputPattern).max())) # Do winner-take-all if doWinners: if (self.numWinners>0) and (self.numWinners < (inputPattern > 0).sum()): sparseInput = numpy.zeros(inputPattern.shape) # Don't consider strongly negative numbers as winners. sorted = inputPattern.argsort()[0:self.numWinners] sparseInput[sorted] += inputPattern[sorted] inputPattern = sparseInput # Do binarization if self.doBinarization: # Don't binarize negative numbers to positive 1. inputPattern = (inputPattern > self.binarizationThreshold).astype(float) return inputPattern def prototypeSetCategory(self, idToRelabel, newCategory): if idToRelabel not in self._categoryRecencyList: return recordIndex = self._categoryRecencyList.index(idToRelabel) self._categoryList[recordIndex] = newCategory def removeIds(self, idsToRemove): # Form a list of all categories to remove rowsToRemove = [k for k, rowID in enumerate(self._categoryRecencyList) \ if rowID in idsToRemove] # Remove rows from the classifier self._removeRows(rowsToRemove) def removeCategory(self, categoryToRemove): removedRows = 0 if self._Memory is None: return removedRows # The internal category indices are stored in float # format, so we should compare with a float catToRemove = float(categoryToRemove) # Form a list of all categories to remove rowsToRemove = [k for k, catID in enumerate(self._categoryList) \ if catID == catToRemove] # Remove rows from the classifier self._removeRows(rowsToRemove) assert catToRemove not in self._categoryList def _removeRows(self, rowsToRemove): """ A list of row indices to remove. There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed. """ # Form a numpy array of row indices to be removed removalArray = numpy.array(rowsToRemove) # Remove categories self._categoryList = numpy.delete(numpy.array(self._categoryList), removalArray).tolist() if self.fixedCapacity: self._categoryRecencyList = numpy.delete( numpy.array(self._categoryRecencyList), removalArray).tolist() # Remove the partition ID, if any for these rows and rebuild the id map. for row in reversed(rowsToRemove): # Go backwards # Remove these patterns from partitionList self._partitionIdList.pop(row) self._rebuildPartitionIdMap(self._partitionIdList) # Remove actual patterns if self.useSparseMemory: # Delete backwards for rowIndex in rowsToRemove[::-1]: self._Memory.deleteRow(rowIndex) else: self._M = numpy.delete(self._M, removalArray, 0) numRemoved = len(rowsToRemove) # Sanity checks numRowsExpected = self._numPatterns - numRemoved if self.useSparseMemory: if self._Memory is not None: assert self._Memory.nRows() == numRowsExpected else: assert self._M.shape[0] == numRowsExpected assert len(self._categoryList) == numRowsExpected self._numPatterns -= numRemoved return numRemoved def doIteration(self): """Utility method to increment the iteration index. Intended for models that don't learn each timestep. """ self._iterationIdx += 1 def learn(self, inputPattern, inputCategory, partitionId=None, isSparse=0, rowID=None): """Train the classifier to associate specified input pattern with a particular category. @param inputPattern (list) The pattern to be assigned a category. If isSparse is 0, this should be a dense array (both ON and OFF bits present). Otherwise, if isSparse > 0, this should be a list of the indices of the non-zero bits in sorted order @param inputCategory (int) The category to be associated to the training pattern @param partitionId (int) partitionID allows you to associate an id with each input vector. It can be used to associate input patterns stored in the classifier with an external id. This can be useful for debugging or visualizing. Another use case is to ignore vectors with a specific id during inference (see description of infer() for details). There can be at most one partitionId per stored pattern (i.e. if two patterns are within distThreshold, only the first partitionId will be stored). This is an optional parameter. @param isSparse (int) If 0, the input pattern is a dense representation. If isSparse > 0, the input pattern is a list of non-zero indices and isSparse is the length of the dense representation @param rowID (int) UNKNOWN @return The number of patterns currently stored in the classifier """ if self.verbosity >= 1: print "%s learn:" % g_debugPrefix print " category:", int(inputCategory) print " active inputs:", _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) if rowID is None: rowID = self._iterationIdx # Dense vectors if not self.useSparseMemory: # Not supported assert self.cellsPerCol == 0, "not implemented for dense vectors" # If the input was given in sparse form, convert it to dense if isSparse > 0: denseInput = numpy.zeros(isSparse) denseInput[inputPattern] = 1.0 inputPattern = denseInput if self._specificIndexTraining and not self._nextTrainingIndices: # Specific index mode without any index provided - skip training return self._numPatterns if self._Memory is None: # Initialize memory with 100 rows and numPatterns = 0 inputWidth = len(inputPattern) self._Memory = numpy.zeros((100,inputWidth)) self._numPatterns = 0 self._M = self._Memory[:self._numPatterns] addRow = True if self._vt is not None: # Compute projection inputPattern = numpy.dot(self._vt, inputPattern - self._mean) if self.distThreshold > 0: # Check if input is too close to an existing input to be accepted dist = self._calcDistance(inputPattern) minDist = dist.min() addRow = (minDist >= self.distThreshold) if addRow: self._protoSizes = None # need to re-compute if self._numPatterns == self._Memory.shape[0]: # Double the size of the memory self._doubleMemoryNumRows() if not self._specificIndexTraining: # Normal learning - append the new input vector self._Memory[self._numPatterns] = inputPattern self._numPatterns += 1 self._categoryList.append(int(inputCategory)) else: # Specific index training mode - insert vector in specified slot vectorIndex = self._nextTrainingIndices.pop(0) while vectorIndex >= self._Memory.shape[0]: self._doubleMemoryNumRows() self._Memory[vectorIndex] = inputPattern self._numPatterns = max(self._numPatterns, vectorIndex + 1) if vectorIndex >= len(self._categoryList): self._categoryList += [-1] * (vectorIndex - len(self._categoryList) + 1) self._categoryList[vectorIndex] = int(inputCategory) # Set _M to the "active" part of _Memory self._M = self._Memory[0:self._numPatterns] self._addPartitionId(self._numPatterns-1, partitionId) # Sparse vectors else: # If the input was given in sparse form, convert it to dense if necessary if isSparse > 0 and (self._vt is not None or self.distThreshold > 0 \ or self.numSVDDims is not None or self.numSVDSamples is not None \ or self.numWinners > 0): denseInput = numpy.zeros(isSparse) denseInput[inputPattern] = 1.0 inputPattern = denseInput isSparse = 0 # Get the input width if isSparse > 0: inputWidth = isSparse else: inputWidth = len(inputPattern) # Allocate storage if this is the first training vector if self._Memory is None: self._Memory = NearestNeighbor(0, inputWidth) # Support SVD if it is on if self._vt is not None: inputPattern = numpy.dot(self._vt, inputPattern - self._mean) # Threshold the input, zeroing out entries that are too close to 0. # This is only done if we are given a dense input. if isSparse == 0: thresholdedInput = self._sparsifyVector(inputPattern, True) addRow = True # If given the layout of the cells, then turn on the logic that stores # only the start cell for bursting columns. if self.cellsPerCol >= 1: burstingCols = thresholdedInput.reshape(-1, self.cellsPerCol).min(axis=1).nonzero()[0] for col in burstingCols: thresholdedInput[(col * self.cellsPerCol) + 1 : (col * self.cellsPerCol) + self.cellsPerCol] = 0 # Don't learn entries that are too close to existing entries. if self._Memory.nRows() > 0: dist = None # if this vector is a perfect match for one we already learned, then # replace the category - it may have changed with online learning on. if self.replaceDuplicates: dist = self._calcDistance(thresholdedInput, distanceNorm=1) if dist.min() == 0: rowIdx = dist.argmin() self._categoryList[rowIdx] = int(inputCategory) if self.fixedCapacity: self._categoryRecencyList[rowIdx] = rowID addRow = False # Don't add this vector if it matches closely with another we already # added if self.distThreshold > 0: if dist is None or self.distanceNorm != 1: dist = self._calcDistance(thresholdedInput) minDist = dist.min() addRow = (minDist >= self.distThreshold) if not addRow: if self.fixedCapacity: rowIdx = dist.argmin() self._categoryRecencyList[rowIdx] = rowID # If sparsity is too low, we do not want to add this vector if addRow and self.minSparsity > 0.0: if isSparse==0: sparsity = ( float(len(thresholdedInput.nonzero()[0])) / len(thresholdedInput) ) else: sparsity = float(len(inputPattern)) / isSparse if sparsity < self.minSparsity: addRow = False # Add the new sparse vector to our storage if addRow: self._protoSizes = None # need to re-compute if isSparse == 0: self._Memory.addRow(thresholdedInput) else: self._Memory.addRowNZ(inputPattern, [1]*len(inputPattern)) self._numPatterns += 1 self._categoryList.append(int(inputCategory)) self._addPartitionId(self._numPatterns-1, partitionId) if self.fixedCapacity: self._categoryRecencyList.append(rowID) if self._numPatterns > self.maxStoredPatterns and \ self.maxStoredPatterns > 0: leastRecentlyUsedPattern = numpy.argmin(self._categoryRecencyList) self._Memory.deleteRow(leastRecentlyUsedPattern) self._categoryList.pop(leastRecentlyUsedPattern) self._categoryRecencyList.pop(leastRecentlyUsedPattern) self._numPatterns -= 1 if self.numSVDDims is not None and self.numSVDSamples is not None \ and self._numPatterns == self.numSVDSamples: self.computeSVD() return self._numPatterns def getOverlaps(self, inputPattern): """Return the degree of overlap between an input pattern and each category stored in the classifier. The overlap is computed by compuing: logical_and(inputPattern != 0, trainingPattern != 0).sum() @param inputPattern pattern to check overlap of @return (overlaps, categories) Two numpy arrays of the same length: overlaps: an integer overlap amount for each category categories: category index for each element of overlaps """ assert self.useSparseMemory, "Not implemented yet for dense storage" overlaps = self._Memory.rightVecSumAtNZ(inputPattern) return (overlaps, self._categoryList) def getDistances(self, inputPattern): """Return the distances between the input pattern and all other stored patterns. @param inputPattern pattern to check distance with @return (distances, categories) numpy arrays of the same length: overlaps: an integer overlap amount for each category categories: category index for each element of distances """ dist = self._getDistances(inputPattern) return (dist, self._categoryList) def infer(self, inputPattern, computeScores=True, overCategories=True, partitionId=None): """Finds the category that best matches the input pattern. Returns the winning category index as well as a distribution over all categories. @param inputPattern (list) A pattern to be classified @param computeScores NO EFFECT @param overCategories NO EFFECT @param partitionId (int) If provided, all training vectors with partitionId equal to that of the input pattern are ignored. For example, this may be used to perform k-fold cross validation without repopulating the classifier. First partition all the data into k equal partitions numbered 0, 1, 2, ... and then call learn() for each vector passing in its partitionId. Then, during inference, by passing in the partition ID in the call to infer(), all other vectors with the same partitionId are ignored simulating the effect of repopulating the classifier while ommitting the training vectors in the same partition. This method returns a 4-tuple: (winner, inferenceResult, dist, categoryDist) winner: The category with the greatest number of nearest neighbors within the kth nearest neighbors. If the inferenceResult contains no neighbors, the value of winner is None. This can happen, for example, in cases of exact matching, if there are no stored vectors, or if minSparsity is not met. inferenceResult: A list of length numCategories, each entry contains the number of neighbors within the top k neighbors that are in that category. dist: A list of length numPrototypes. Each entry is the distance from the unknown to that prototype. All distances are between 0.0 and 1.0 categoryDist: A list of length numCategories. Each entry is the distance from the unknown to the nearest prototype of that category. All distances are between 0 and 1.0. """ # Calculate sparsity. If sparsity is too low, we do not want to run # inference with this vector sparsity = 0.0 if self.minSparsity > 0.0: sparsity = ( float(len(inputPattern.nonzero()[0])) / len(inputPattern) ) if len(self._categoryList) == 0 or sparsity < self.minSparsity: # No categories learned yet; i.e. first inference w/ online learning or # insufficient sparsity winner = None inferenceResult = numpy.zeros(1) dist = numpy.ones(1) categoryDist = numpy.ones(1) else: maxCategoryIdx = max(self._categoryList) inferenceResult = numpy.zeros(maxCategoryIdx+1) dist = self._getDistances(inputPattern, partitionId=partitionId) validVectorCount = len(self._categoryList) - self._categoryList.count(-1) # Loop through the indices of the nearest neighbors. if self.exact: # Is there an exact match in the distances? exactMatches = numpy.where(dist<0.00001)[0] if len(exactMatches) > 0: for i in exactMatches[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[i]] += 1.0 else: sorted = dist.argsort() for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 # Prepare inference results. if inferenceResult.any(): winner = inferenceResult.argmax() inferenceResult /= inferenceResult.sum() else: winner = None categoryDist = min_score_per_category(maxCategoryIdx, self._categoryList, dist) categoryDist.clip(0, 1.0, categoryDist) if self.verbosity >= 1: print "%s infer:" % (g_debugPrefix) print " active inputs:", _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) print " winner category:", winner print " pct neighbors of each category:", inferenceResult print " dist of each prototype:", dist print " dist of each category:", categoryDist result = (winner, inferenceResult, dist, categoryDist) return result def getClosest(self, inputPattern, topKCategories=3): """Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories. """ inferenceResult = numpy.zeros(max(self._categoryList)+1) dist = self._getDistances(inputPattern) sorted = dist.argsort() validVectorCount = len(self._categoryList) - self._categoryList.count(-1) for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 winner = inferenceResult.argmax() topNCats = [] for i in range(topKCategories): topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]] )) return winner, dist, topNCats def closestTrainingPattern(self, inputPattern, cat): """Returns the closest training pattern to inputPattern that belongs to category "cat". @param inputPattern The pattern whose closest neighbor is sought @param cat The required category of closest neighbor @return A dense version of the closest training pattern, or None if no such patterns exist """ dist = self._getDistances(inputPattern) sorted = dist.argsort() for patIdx in sorted: patternCat = self._categoryList[patIdx] # If closest pattern belongs to desired category, return it if patternCat == cat: if self.useSparseMemory: closestPattern = self._Memory.getRow(int(patIdx)) else: closestPattern = self._M[patIdx] return closestPattern # No patterns were found! return None def closestOtherTrainingPattern(self, inputPattern, cat): """Return the closest training pattern that is *not* of the given category "cat". @param inputPattern The pattern whose closest neighbor is sought @param cat Training patterns of this category will be ignored no matter their distance to inputPattern @return A dense version of the closest training pattern, or None if no such patterns exist """ dist = self._getDistances(inputPattern) sorted = dist.argsort() for patIdx in sorted: patternCat = self._categoryList[patIdx] # If closest pattern does not belong to specified category, return it if patternCat != cat: if self.useSparseMemory: closestPattern = self._Memory.getRow(int(patIdx)) else: closestPattern = self._M[patIdx] return closestPattern # No patterns were found! return None def getPattern(self, idx, sparseBinaryForm=False, cat=None): """Gets a training pattern either by index or category number. @param idx Index of the training pattern @param sparseBinaryForm If true, returns a list of the indices of the non-zero bits in the training pattern @param cat If not None, get the first pattern belonging to category cat. If this is specified, idx must be None. @return The training pattern with specified index """ if cat is not None: assert idx is None idx = self._categoryList.index(cat) if not self.useSparseMemory: pattern = self._Memory[idx] if sparseBinaryForm: pattern = pattern.nonzero()[0] else: (nz, values) = self._Memory.rowNonZeros(idx) if not sparseBinaryForm: pattern = numpy.zeros(self._Memory.nCols()) numpy.put(pattern, nz, 1) else: pattern = nz return pattern def getPartitionId(self, i): """ Returns the partition Id associated with pattern i. Returns None if no Id is associated with it. """ if (i < 0) or (i >= self._numPatterns): raise RuntimeError("index out of bounds") partitionId = self._partitionIdList[i] if partitionId == numpy.inf: return None else: return partitionId def getPartitionIdPerPattern(self): """ Returns a list of numPatterns elements where the i'th position contains the integer partition Id associated with pattern i. If pattern i had no partition Id, it's value will be numpy.inf """ return self._partitionIdList def getNumPartitionIds(self): """ Return the number of unique partition Ids stored. """ return len(self._partitionIdMap) def getPartitionIdList(self): """ Return a list containing unique (non-None) partition Ids """ return self._partitionIdMap.keys() def getPatternIndicesWithPartitionId(self, partitionId): """ Returns a list of pattern indices corresponding to this partitionId. Return an empty list if there are none """ return self._partitionIdMap.get(partitionId, []) def _addPartitionId(self, index, partitionId=None): """ Adds partition id for pattern index """ if partitionId is None: self._partitionIdList.append(numpy.inf) else: self._partitionIdList.append(partitionId) indices = self._partitionIdMap.get(partitionId, []) indices.append(index) self._partitionIdMap[partitionId] = indices def _rebuildPartitionIdMap(self, partitionIdList): """ Rebuilds the partition Id map using the given partitionIdList """ self._partitionIdMap = {} for row, partitionId in enumerate(partitionIdList): indices = self._partitionIdMap.get(partitionId, []) indices.append(row) self._partitionIdMap[partitionId] = indices def _calcDistance(self, inputPattern, distanceNorm=None): """Calculate the distances from inputPattern to all stored patterns. All distances are between 0.0 and 1.0 @param inputPattern The pattern from which distances to all other patterns are calculated @param distanceNorm Degree of the distance norm """ if distanceNorm is None: distanceNorm = self.distanceNorm # Sparse memory if self.useSparseMemory: if self._protoSizes is None: self._protoSizes = self._Memory.rowSums() overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern) inputPatternSum = inputPattern.sum() if self.distanceMethod == "rawOverlap": dist = inputPattern.sum() - overlapsWithProtos elif self.distanceMethod == "pctOverlapOfInput": dist = inputPatternSum - overlapsWithProtos if inputPatternSum > 0: dist /= inputPatternSum elif self.distanceMethod == "pctOverlapOfProto": overlapsWithProtos /= self._protoSizes dist = 1.0 - overlapsWithProtos elif self.distanceMethod == "pctOverlapOfLarger": maxVal = numpy.maximum(self._protoSizes, inputPatternSum) if maxVal.all() > 0: overlapsWithProtos /= maxVal dist = 1.0 - overlapsWithProtos elif self.distanceMethod == "norm": dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern) distMax = dist.max() if distMax > 0: dist /= distMax else: raise RuntimeError("Unimplemented distance method %s" % self.distanceMethod) # Dense memory else: if self.distanceMethod == "norm": dist = numpy.power(numpy.abs(self._M - inputPattern), self.distanceNorm) dist = dist.sum(1) dist = numpy.power(dist, 1.0/self.distanceNorm) dist /= dist.max() else: raise RuntimeError ("Not implemented yet for dense storage....") return dist def _getDistances(self, inputPattern, partitionId=None): """Return the distances from inputPattern to all stored patterns. @param inputPattern The pattern from which distances to all other patterns are returned @param partitionId If provided, ignore all training vectors with this partitionId. """ if not self._finishedLearning: self.finishLearning() self._finishedLearning = True if self._vt is not None and len(self._vt) > 0: inputPattern = numpy.dot(self._vt, inputPattern - self._mean) sparseInput = self._sparsifyVector(inputPattern) # Compute distances dist = self._calcDistance(sparseInput) # Invalidate results where category is -1 if self._specificIndexTraining: dist[numpy.array(self._categoryList) == -1] = numpy.inf # Ignore vectors with this partition id by setting their distances to inf if partitionId is not None: dist[self._partitionIdMap.get(partitionId, [])] = numpy.inf return dist def finishLearning(self): if self.numSVDDims is not None and self._vt is None: self.computeSVD() def computeSVD(self, numSVDSamples=None, finalize=True): if numSVDSamples is None: numSVDSamples = self._numPatterns if not self.useSparseMemory: self._a = self._Memory[:self._numPatterns] else: self._a = self._Memory.toDense()[:self._numPatterns] self._mean = numpy.mean(self._a, axis=0) self._a -= self._mean u,self._s,self._vt = numpy.linalg.svd(self._a[:numSVDSamples]) if finalize: self.finalizeSVD() return self._s def getAdaptiveSVDDims(self, singularValues, fractionOfMax=0.001): v = singularValues/singularValues[0] idx = numpy.where(v<fractionOfMax)[0] if len(idx): print "Number of PCA dimensions chosen: ", idx[0], "out of ", len(v) return idx[0] else: print "Number of PCA dimensions chosen: ", len(v)-1, "out of ", len(v) return len(v)-1 def finalizeSVD(self, numSVDDims=None): if numSVDDims is not None: self.numSVDDims = numSVDDims if self.numSVDDims=="adaptive": if self.fractionOfMax is not None: self.numSVDDims = self.getAdaptiveSVDDims(self._s, self.fractionOfMax) else: self.numSVDDims = self.getAdaptiveSVDDims(self._s) if self._vt.shape[0] < self.numSVDDims: print "******************************************************************" print ("Warning: The requested number of PCA dimensions is more than " "the number of pattern dimensions.") print "Setting numSVDDims = ", self._vt.shape[0] print "******************************************************************" self.numSVDDims = self._vt.shape[0] self._vt = self._vt[:self.numSVDDims] # Added when svd is not able to decompose vectors - uses raw spare vectors if len(self._vt) == 0: return self._Memory = numpy.zeros((self._numPatterns,self.numSVDDims)) self._M = self._Memory self.useSparseMemory = False for i in range(self._numPatterns): self._Memory[i] = numpy.dot(self._vt, self._a[i]) self._a = None def remapCategories(self, mapping): """Change the category indices. Used by the Network Builder to keep the category indices in sync with the ImageSensor categoryInfo when the user renames or removes categories. @param mapping List of new category indices. For example, mapping=[2,0,1] would change all vectors of category 0 to be category 2, category 1 to 0, and category 2 to 1 """ categoryArray = numpy.array(self._categoryList) newCategoryArray = numpy.zeros(categoryArray.shape[0]) newCategoryArray.fill(-1) for i in xrange(len(mapping)): newCategoryArray[categoryArray==i] = mapping[i] self._categoryList = list(newCategoryArray) def setCategoryOfVectors(self, vectorIndices, categoryIndices): """Change the category associated with this vector(s). Used by the Network Builder to move vectors between categories, to enable categories, and to invalidate vectors by setting the category to -1. @param vectorIndices Single index or list of indices @param categoryIndices Single index or list of indices. Can also be a single index when vectorIndices is a list, in which case the same category will be used for all vectors """ if not hasattr(vectorIndices, "__iter__"): vectorIndices = [vectorIndices] categoryIndices = [categoryIndices] elif not hasattr(categoryIndices, "__iter__"): categoryIndices = [categoryIndices] * len(vectorIndices) for i in xrange(len(vectorIndices)): vectorIndex = vectorIndices[i] categoryIndex = categoryIndices[i] # Out-of-bounds is not an error, because the KNN may not have seen the # vector yet if vectorIndex < len(self._categoryList): self._categoryList[vectorIndex] = categoryIndex def __getstate__(self): """Return serializable state. This function will return a version of the __dict__. """ state = self.__dict__.copy() return state def __setstate__(self, state): """Set the state of this object from a serialized state.""" if "version" not in state: pass elif state["version"] == 1: pass elif state["version"] == 2: raise RuntimeError("Invalid deserialization of invalid KNNClassifier" "Version") # Backward compatibility if "_partitionIdArray" in state: state.pop("_partitionIdArray") if "minSparsity" not in state: state["minSparsity"] = 0.0 self.__dict__.update(state) # Backward compatibility if "_partitionIdMap" not in state: self._rebuildPartitionIdMap(self._partitionIdList) # Set to new version self.version = KNNCLASSIFIER_VERSION
1
20,844
Pick more specific exception types for these cases. Not sure what would be best for this one (`ValueError`?) but the next one could be `IndexError`.
numenta-nupic
py
@@ -0,0 +1,6 @@ +const jestConfig = require( './jest.config' ); + +module.exports = { + ...jestConfig, + testMatch: [ '<rootDir>/.storybook/?(*.)test.js' ], +};
1
1
40,481
This seemed like the easiest solution We need to include this path in `testMatch` to run the test AFAIK *BUT* we don't want this test ran when running them all normally I'm sure there are other ways to do this!
google-site-kit-wp
js
@@ -114,7 +114,7 @@ namespace OpenTelemetry.Exporter.Jaeger.Implementation.Tests var ex = Assert.Throws<TTransportException>(() => transport.Flush()); - Assert.Equal("Cannot flush closed transport. message, yo", ex.Message); + Assert.Equal("Cannot flush closed transport", ex.Message); } [Fact]
1
// <copyright file="ThriftUdpClientTransportTests.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.IO; using System.Threading; using System.Threading.Tasks; using Moq; using Thrift.Transport; using Xunit; namespace OpenTelemetry.Exporter.Jaeger.Implementation.Tests { public class ThriftUdpClientTransportTests : IDisposable { private readonly Mock<IJaegerClient> mockClient = new Mock<IJaegerClient>(); private MemoryStream testingMemoryStream = new MemoryStream(); public void Dispose() { this.testingMemoryStream?.Dispose(); } [Fact] public void Constructor_ShouldConnectClient() { var host = "host, yo"; var port = 4528; new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); this.mockClient.Verify(t => t.Connect(host, port), Times.Once); } [Fact] public void Close_ShouldCloseClient() { var host = "host, yo"; var port = 4528; var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); transport.Close(); this.mockClient.Verify(t => t.Close(), Times.Once); } [Fact] public async Task Write_ShouldWriteToMemoryStream() { var host = "host, yo"; var port = 4528; var writeBuffer = new byte[] { 0x20, 0x10, 0x40, 0x30, 0x18, 0x14, 0x10, 0x28 }; var readBuffer = new byte[8]; var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); transport.Write(writeBuffer); this.testingMemoryStream.Seek(0, SeekOrigin.Begin); var size = await this.testingMemoryStream.ReadAsync(readBuffer, 0, 8, CancellationToken.None); Assert.Equal(8, size); Assert.Equal(writeBuffer, readBuffer); } [Fact] public void Flush_ShouldReturnWhenNothingIsInTheStream() { var host = "host, yo"; var port = 4528; var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); var tInfo = transport.Flush(); this.mockClient.Verify(t => t.Send(It.IsAny<byte[]>()), Times.Never); } [Fact] public void Flush_ShouldSendStreamBytes() { var host = "host, yo"; var port = 4528; var streamBytes = new byte[] { 0x20, 0x10, 0x40, 0x30, 0x18, 0x14, 0x10, 0x28 }; this.testingMemoryStream = new MemoryStream(streamBytes); var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); var tInfo = transport.Flush(); this.mockClient.Verify(t => t.Send(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>()), Times.Once); } [Fact] public void Flush_ShouldThrowWhenClientDoes() { var host = "host, yo"; var port = 4528; var streamBytes = new byte[] { 0x20, 0x10, 0x40, 0x30, 0x18, 0x14, 0x10, 0x28 }; this.testingMemoryStream = new MemoryStream(streamBytes); this.mockClient.Setup(t => t.Send(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>())).Throws(new Exception("message, yo")); var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); var ex = Assert.Throws<TTransportException>(() => transport.Flush()); Assert.Equal("Cannot flush closed transport. message, yo", ex.Message); } [Fact] public void Dispose_ShouldCloseClientAndDisposeMemoryStream() { var host = "host, yo"; var port = 4528; var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); transport.Dispose(); this.mockClient.Verify(t => t.Dispose(), Times.Once); Assert.False(this.testingMemoryStream.CanRead); Assert.False(this.testingMemoryStream.CanSeek); Assert.False(this.testingMemoryStream.CanWrite); } [Fact] public void Dispose_ShouldNotTryToDisposeResourcesMoreThanOnce() { var host = "host, yo"; var port = 4528; var transport = new JaegerThriftClientTransport(host, port, this.testingMemoryStream, this.mockClient.Object); transport.Dispose(); transport.Dispose(); this.mockClient.Verify(t => t.Dispose(), Times.Once); Assert.False(this.testingMemoryStream.CanRead); Assert.False(this.testingMemoryStream.CanSeek); Assert.False(this.testingMemoryStream.CanWrite); } } }
1
21,675
Most of the following test changes can be avoided if need be.
open-telemetry-opentelemetry-dotnet
.cs
@@ -181,14 +181,8 @@ int Partitioner::Run(const PartitionConfig &config) TIMER_STOP(packed_mlp); util::Log() << "MultiLevelPartition constructed in " << TIMER_SEC(packed_mlp) << " seconds"; - TIMER_START(cell_storage); - CellStorage storage(mlp, *edge_based_graph); - TIMER_STOP(cell_storage); - util::Log() << "CellStorage constructed in " << TIMER_SEC(cell_storage) << " seconds"; - TIMER_START(writing_mld_data); io::write(config.mld_partition_path, mlp); - io::write(config.mld_storage_path, storage); TIMER_STOP(writing_mld_data); util::Log() << "MLD data writing took " << TIMER_SEC(writing_mld_data) << " seconds";
1
#include "partition/partitioner.hpp" #include "partition/bisection_graph.hpp" #include "partition/bisection_to_partition.hpp" #include "partition/compressed_node_based_graph_reader.hpp" #include "partition/edge_based_graph_reader.hpp" #include "partition/io.hpp" #include "partition/multi_level_partition.hpp" #include "partition/node_based_graph_to_edge_based_graph_mapping_reader.hpp" #include "partition/recursive_bisection.hpp" #include "util/coordinate.hpp" #include "util/geojson_debug_logger.hpp" #include "util/geojson_debug_policies.hpp" #include "util/integer_range.hpp" #include "util/json_container.hpp" #include "util/log.hpp" #include <algorithm> #include <iterator> #include <unordered_set> #include <vector> #include <boost/assert.hpp> #include "util/geojson_debug_logger.hpp" #include "util/geojson_debug_policies.hpp" #include "util/json_container.hpp" #include "util/timing_util.hpp" namespace osrm { namespace partition { void LogGeojson(const std::string &filename, const std::vector<std::uint32_t> &bisection_ids) { // reload graph, since we destroyed the old one auto compressed_node_based_graph = LoadCompressedNodeBasedGraph(filename); util::Log() << "Loaded compressed node based graph: " << compressed_node_based_graph.edges.size() << " edges, " << compressed_node_based_graph.coordinates.size() << " nodes"; groupEdgesBySource(begin(compressed_node_based_graph.edges), end(compressed_node_based_graph.edges)); auto graph = makeBisectionGraph(compressed_node_based_graph.coordinates, adaptToBisectionEdge(std::move(compressed_node_based_graph.edges))); const auto get_level = [](const std::uint32_t lhs, const std::uint32_t rhs) { auto xored = lhs ^ rhs; std::uint32_t level = log(xored) / log(2.0); return level; }; std::vector<std::vector<util::Coordinate>> border_vertices(33); for (NodeID nid = 0; nid < graph.NumberOfNodes(); ++nid) { const auto source_id = bisection_ids[nid]; for (const auto &edge : graph.Edges(nid)) { const auto target_id = bisection_ids[edge.target]; if (source_id != target_id) { auto level = get_level(source_id, target_id); border_vertices[level].push_back(graph.Node(nid).coordinate); border_vertices[level].push_back(graph.Node(edge.target).coordinate); } } } util::ScopedGeojsonLoggerGuard<util::CoordinateVectorToMultiPoint> guard( "border_vertices.geojson"); std::size_t level = 0; for (auto &bv : border_vertices) { if (!bv.empty()) { std::sort(bv.begin(), bv.end(), [](const auto lhs, const auto rhs) { return std::tie(lhs.lon, lhs.lat) < std::tie(rhs.lon, rhs.lat); }); bv.erase(std::unique(bv.begin(), bv.end()), bv.end()); util::json::Object jslevel; jslevel.values["level"] = util::json::Number(level++); guard.Write(bv, jslevel); } } } int Partitioner::Run(const PartitionConfig &config) { auto compressed_node_based_graph = LoadCompressedNodeBasedGraph(config.compressed_node_based_graph_path.string()); util::Log() << "Loaded compressed node based graph: " << compressed_node_based_graph.edges.size() << " edges, " << compressed_node_based_graph.coordinates.size() << " nodes"; groupEdgesBySource(begin(compressed_node_based_graph.edges), end(compressed_node_based_graph.edges)); auto graph = makeBisectionGraph(compressed_node_based_graph.coordinates, adaptToBisectionEdge(std::move(compressed_node_based_graph.edges))); util::Log() << " running partition: " << config.minimum_cell_size << " " << config.balance << " " << config.boundary_factor << " " << config.num_optimizing_cuts << " " << config.small_component_size << " # max_cell_size balance boundary cuts small_component_size"; RecursiveBisection recursive_bisection(graph, config.minimum_cell_size, config.balance, config.boundary_factor, config.num_optimizing_cuts, config.small_component_size); // Up until now we worked on the compressed node based graph. // But what we actually need is a partition for the edge based graph to work on. // The following loads a mapping from node based graph to edge based graph. // Then loads the edge based graph tanslates the partition and modifies it. // For details see #3205 auto mapping = LoadNodeBasedGraphToEdgeBasedGraphMapping(config.cnbg_ebg_mapping_path.string()); util::Log() << "Loaded node based graph to edge based graph mapping"; auto edge_based_graph = LoadEdgeBasedGraph(config.edge_based_graph_path.string()); util::Log() << "Loaded edge based graph for mapping partition ids: " << edge_based_graph->GetNumberOfEdges() << " edges, " << edge_based_graph->GetNumberOfNodes() << " nodes"; // TODO: node based graph to edge based graph partition id mapping should be done split off. // Partition ids, keyed by node based graph nodes const auto &node_based_partition_ids = recursive_bisection.BisectionIDs(); // Partition ids, keyed by edge based graph nodes std::vector<NodeID> edge_based_partition_ids(edge_based_graph->GetNumberOfNodes()); // Extract edge based border nodes, based on node based partition and mapping. for (const auto node : util::irange(0u, edge_based_graph->GetNumberOfNodes())) { const auto node_based_nodes = mapping.Lookup(node); const auto u = node_based_nodes.u; const auto v = node_based_nodes.v; if (node_based_partition_ids[u] == node_based_partition_ids[v]) { // Can use partition_ids[u/v] as partition for edge based graph `node_id` edge_based_partition_ids[node] = node_based_partition_ids[u]; } else { // Border nodes u,v - need to be resolved. // FIXME: just pick one side for now. See #3205. edge_based_partition_ids[node] = node_based_partition_ids[u]; } } std::vector<Partition> partitions; std::vector<std::uint32_t> level_to_num_cells; std::tie(partitions, level_to_num_cells) = bisectionToPartition(edge_based_partition_ids, {config.minimum_cell_size, config.minimum_cell_size * 32, config.minimum_cell_size * 32 * 16, config.minimum_cell_size * 32 * 16 * 32}); util::Log() << "Edge-based-graph annotation:"; for (std::size_t level = 0; level < level_to_num_cells.size(); ++level) { util::Log() << " level " << level + 1 << " #cells " << level_to_num_cells[level] << " bit size " << std::ceil(std::log2(level_to_num_cells[level] + 1)); } TIMER_START(packed_mlp); MultiLevelPartition mlp{partitions, level_to_num_cells}; TIMER_STOP(packed_mlp); util::Log() << "MultiLevelPartition constructed in " << TIMER_SEC(packed_mlp) << " seconds"; TIMER_START(cell_storage); CellStorage storage(mlp, *edge_based_graph); TIMER_STOP(cell_storage); util::Log() << "CellStorage constructed in " << TIMER_SEC(cell_storage) << " seconds"; TIMER_START(writing_mld_data); io::write(config.mld_partition_path, mlp); io::write(config.mld_storage_path, storage); TIMER_STOP(writing_mld_data); util::Log() << "MLD data writing took " << TIMER_SEC(writing_mld_data) << " seconds"; return 0; } } // namespace partition } // namespace osrm
1
20,595
Eventually we might want to move this back as generating this data structure takes some time (for large datasets) and we don't want it in the hot-path of `osrm-customize` runs. Thinking long-term making `osrm-customize` eventually digest `.osrm.cells` will will enable only updating cells selectively and using an earlier customization for the rest.
Project-OSRM-osrm-backend
cpp
@@ -110,8 +110,15 @@ func (cmd *CommandRun) Wait() error { return cmd.vpnServer.Wait() } -func (cmd *CommandRun) Kill() { +func (cmd *CommandRun) Kill() (err error) { cmd.vpnServer.Stop() - cmd.dialogWaiter.Stop() - cmd.natService.Stop() + err = cmd.dialogWaiter.Stop() + if err != nil { + return err + } + err = cmd.natService.Stop() + if err != nil { + return err + } + return nil }
1
package command_run import ( log "github.com/cihub/seelog" "github.com/mysterium/node/communication" "github.com/mysterium/node/identity" "github.com/mysterium/node/ipify" "github.com/mysterium/node/location" "github.com/mysterium/node/nat" "github.com/mysterium/node/openvpn" "github.com/mysterium/node/openvpn/service_discovery" "github.com/mysterium/node/server" dto_discovery "github.com/mysterium/node/service_discovery/dto" "github.com/mysterium/node/session" "github.com/pkg/errors" "time" ) type CommandRun struct { identityLoader func() (identity.Identity, error) createSigner identity.SignerFactory ipifyClient ipify.Client mysteriumClient server.Client natService nat.NATService dialogWaiterFactory func(identity identity.Identity) (communication.DialogWaiter, dto_discovery.Contact) dialogWaiter communication.DialogWaiter sessionManagerFactory func(serverIp string) session.ManagerInterface vpnServerFactory func() *openvpn.Server vpnServer *openvpn.Server } func (cmd *CommandRun) Run() (err error) { providerId, err := cmd.identityLoader() if err != nil { return err } var providerContact dto_discovery.Contact cmd.dialogWaiter, providerContact = cmd.dialogWaiterFactory(providerId) // if for some reason we will need truly external IP, use GetPublicIP() vpnServerIp, err := cmd.ipifyClient.GetOutboundIP() if err != nil { return err } cmd.natService.Add(nat.RuleForwarding{ SourceAddress: "10.8.0.0/24", TargetIp: vpnServerIp, }) if err = cmd.natService.Start(); err != nil { return err } country, err := detectCountry() if err != nil { return err } log.Info("Country detected: ", country) location := dto_discovery.Location{Country: country} proposal := service_discovery.NewServiceProposalWithLocation(providerId, providerContact, location) sessionCreateConsumer := &session.SessionCreateConsumer{ CurrentProposalId: proposal.Id, SessionManager: cmd.sessionManagerFactory(vpnServerIp), } if err = cmd.dialogWaiter.ServeDialogs(sessionCreateConsumer); err != nil { return err } cmd.vpnServer = cmd.vpnServerFactory() if err := cmd.vpnServer.Start(); err != nil { return err } signer := cmd.createSigner(providerId) if err := cmd.mysteriumClient.RegisterProposal(proposal, signer); err != nil { return err } go func() { for { time.Sleep(1 * time.Minute) cmd.mysteriumClient.NodeSendStats(providerId.Address, signer) } }() return nil } func detectCountry() (string, error) { ipifyClient := ipify.NewClient() ip, err := ipifyClient.GetPublicIP() if err != nil { return "", errors.New("IP detection failed: " + err.Error()) } country, err := location.DetectCountry(ip) if err != nil { return "", errors.New("Country detection failed: " + err.Error()) } return country, nil } func (cmd *CommandRun) Wait() error { return cmd.vpnServer.Wait() } func (cmd *CommandRun) Kill() { cmd.vpnServer.Stop() cmd.dialogWaiter.Stop() cmd.natService.Stop() }
1
10,168
You're not really using named result variable - you can just leave `error`.
mysteriumnetwork-node
go
@@ -37,16 +37,14 @@ public class SortOrderUtil { } public static SortOrder buildSortOrder(Table table) { - return buildSortOrder(table.spec(), table.sortOrder()); + return buildSortOrder(table.schema(), table.spec(), table.sortOrder()); } - public static SortOrder buildSortOrder(PartitionSpec spec, SortOrder sortOrder) { + public static SortOrder buildSortOrder(Schema schema, PartitionSpec spec, SortOrder sortOrder) { if (sortOrder.isUnsorted() && spec.isUnpartitioned()) { return SortOrder.unsorted(); } - Schema schema = spec.schema(); - Multimap<Integer, SortField> sortFieldIndex = Multimaps.index(sortOrder.fields(), SortField::sourceId); // build a sort prefix of partition fields that are not already in the sort order
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.util; import java.util.Collection; import org.apache.iceberg.PartitionField; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortField; import org.apache.iceberg.SortOrder; import org.apache.iceberg.Table; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.relocated.com.google.common.collect.Multimap; import org.apache.iceberg.relocated.com.google.common.collect.Multimaps; import org.apache.iceberg.transforms.SortOrderVisitor; public class SortOrderUtil { private SortOrderUtil() { } public static SortOrder buildSortOrder(Table table) { return buildSortOrder(table.spec(), table.sortOrder()); } public static SortOrder buildSortOrder(PartitionSpec spec, SortOrder sortOrder) { if (sortOrder.isUnsorted() && spec.isUnpartitioned()) { return SortOrder.unsorted(); } Schema schema = spec.schema(); Multimap<Integer, SortField> sortFieldIndex = Multimaps.index(sortOrder.fields(), SortField::sourceId); // build a sort prefix of partition fields that are not already in the sort order SortOrder.Builder builder = SortOrder.builderFor(schema); for (PartitionField field : spec.fields()) { Collection<SortField> sortFields = sortFieldIndex.get(field.sourceId()); boolean isSorted = sortFields.stream().anyMatch(sortField -> field.transform().equals(sortField.transform()) || sortField.transform().satisfiesOrderOf(field.transform())); if (!isSorted) { String sourceName = schema.findColumnName(field.sourceId()); builder.asc(Expressions.transform(sourceName, field.transform())); } } // add the configured sort to the partition spec prefix sort SortOrderVisitor.visit(sortOrder, new CopySortOrderFields(builder)); return builder.build(); } }
1
33,632
It wasn't possible to call this method with `PartitionSpec.unpartitioned()` as the schema in the spec was empty.
apache-iceberg
java
@@ -26,11 +26,13 @@ use Thelia\Exception\ModuleException; use Thelia\Log\Tlog; use Thelia\Model\Cart; use Thelia\Model\Country; +use Thelia\Model\Hook; use Thelia\Model\HookQuery; use Thelia\Model\Lang; use Thelia\Model\Map\ModuleImageTableMap; use Thelia\Model\Map\ModuleTableMap; use Thelia\Model\Module; +use Thelia\Model\ModuleConfigQuery; use Thelia\Model\ModuleI18n; use Thelia\Model\ModuleI18nQuery; use Thelia\Model\ModuleImage;
1
<?php /*************************************************************************************/ /* This file is part of the Thelia package. */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* For the full copyright and license information, please view the LICENSE.txt */ /* file that was distributed with this source code. */ /*************************************************************************************/ namespace Thelia\Module; use Propel\Runtime\Connection\ConnectionInterface; use Propel\Runtime\Propel; use Symfony\Component\DependencyInjection\ContainerAware; use Symfony\Component\EventDispatcher\EventDispatcherInterface; use Symfony\Component\HttpFoundation\Request; use Thelia\Core\Event\Hook\HookCreateAllEvent; use Thelia\Core\Event\Hook\HookUpdateEvent; use Thelia\Core\Event\TheliaEvents; use Thelia\Core\HttpFoundation\Session\Session; use Thelia\Core\Template\TemplateDefinition; use Thelia\Exception\ModuleException; use Thelia\Log\Tlog; use Thelia\Model\Cart; use Thelia\Model\Country; use Thelia\Model\HookQuery; use Thelia\Model\Lang; use Thelia\Model\Map\ModuleImageTableMap; use Thelia\Model\Map\ModuleTableMap; use Thelia\Model\Module; use Thelia\Model\ModuleI18n; use Thelia\Model\ModuleI18nQuery; use Thelia\Model\ModuleImage; use Thelia\Model\ModuleQuery; use Thelia\Model\Order; use Thelia\TaxEngine\TaxEngine; use Thelia\Tools\Image; class BaseModule extends ContainerAware implements BaseModuleInterface { const CLASSIC_MODULE_TYPE = 1; const DELIVERY_MODULE_TYPE = 2; const PAYMENT_MODULE_TYPE = 3; const IS_ACTIVATED = 1; const IS_NOT_ACTIVATED = 0; protected $reflected; protected $dispatcher = null; protected $request = null; public function activate($moduleModel = null) { if (null === $moduleModel) { $moduleModel = $this->getModuleModel(); } if ($moduleModel->getActivate() == self::IS_NOT_ACTIVATED) { $con = Propel::getWriteConnection(ModuleTableMap::DATABASE_NAME); $con->beginTransaction(); try { if ($this->preActivation($con)) { $moduleModel->setActivate(self::IS_ACTIVATED); $moduleModel->save($con); $this->postActivation($con); $con->commit(); } } catch (\Exception $e) { $con->rollBack(); throw $e; } $this->registerHooks(); } } public function deActivate($moduleModel = null) { if (null === $moduleModel) { $moduleModel = $this->getModuleModel(); } if ($moduleModel->getActivate() == self::IS_ACTIVATED) { $con = Propel::getWriteConnection(ModuleTableMap::DATABASE_NAME); $con->beginTransaction(); try { if ($this->preDeactivation($con)) { $moduleModel->setActivate(self::IS_NOT_ACTIVATED); $moduleModel->save($con); $this->postDeactivation($con); $con->commit(); } } catch (\Exception $e) { $con->rollBack(); throw $e; } } } public function hasContainer() { return null !== $this->container; } public function getContainer() { if ($this->hasContainer() === false) { throw new \RuntimeException("Sorry, container is not available in this context"); } return $this->container; } public function hasRequest() { return null !== $this->request; } public function setRequest(Request $request) { $this->request = $request; } /** * @return \Thelia\Core\HttpFoundation\Request the request. * * @throws \RuntimeException */ public function getRequest() { if ($this->hasRequest() === false) { // Try to get request from container. $this->setRequest($this->getContainer()->get('request')); } if ($this->hasRequest() === false) { throw new \RuntimeException("Sorry, the request is not available in this context"); } return $this->request; } public function hasDispatcher() { return null !== $this->dispatcher; } public function setDispatcher(EventDispatcherInterface $dispatcher) { $this->dispatcher = $dispatcher; } public function getDispatcher() { if ($this->hasDispatcher() === false) { throw new \RuntimeException("Sorry, the dispatcher is not available in this context"); } return $this->dispatcher; } /** * Sets a module titles for various languages * * @param Module $module the module. * @param array $titles an associative array of locale => title_string */ public function setTitle(Module $module, $titles) { if (is_array($titles)) { foreach ($titles as $locale => $title) { $moduleI18n = ModuleI18nQuery::create()->filterById($module->getId())->filterByLocale($locale)->findOne(); if (null === $moduleI18n) { $moduleI18n = new ModuleI18n(); $moduleI18n ->setId($module->getId()) ->setLocale($locale) ->setTitle($title) ; $moduleI18n->save(); } else { $moduleI18n->setTitle($title); $moduleI18n->save(); } } } } /** * Ensure the proper deployment of the module's images. * * TODO : this method does not take care of internationalization. This is a bug. * * @param Module $module the module * @param string $folderPath the image folder path * @param ConnectionInterface $con * * @throws \Thelia\Exception\ModuleException * @throws \Exception * @throws \UnexpectedValueException */ public function deployImageFolder(Module $module, $folderPath, ConnectionInterface $con = null) { try { $directoryBrowser = new \DirectoryIterator($folderPath); } catch (\UnexpectedValueException $e) { throw $e; } if (null === $con) { $con = Propel::getConnection( ModuleImageTableMap::DATABASE_NAME ); } /* browse the directory */ $imagePosition = 1; /** @var \DirectoryIterator $directoryContent */ foreach ($directoryBrowser as $directoryContent) { /* is it a file ? */ if ($directoryContent->isFile()) { $fileName = $directoryContent->getFilename(); $filePath = $directoryContent->getPathName(); /* is it a picture ? */ if ( Image::isImage($filePath) ) { $con->beginTransaction(); $image = new ModuleImage(); $image->setModuleId($module->getId()); $image->setPosition($imagePosition); $image->save($con); $imageDirectory = sprintf("%s/../../../../local/media/images/module", __DIR__); $imageFileName = sprintf("%s-%d-%s", $module->getCode(), $image->getId(), $fileName); $increment = 0; while (file_exists($imageDirectory . '/' . $imageFileName)) { $imageFileName = sprintf("%s-%d-%d-%s", $module->getCode(), $image->getId(), $increment, $fileName); $increment++; } $imagePath = sprintf('%s/%s', $imageDirectory, $imageFileName); if (! is_dir($imageDirectory)) { if (! @mkdir($imageDirectory, 0777, true)) { $con->rollBack(); throw new ModuleException(sprintf("Cannot create directory : %s", $imageDirectory), ModuleException::CODE_NOT_FOUND); } } if (! @copy($filePath, $imagePath)) { $con->rollBack(); throw new ModuleException(sprintf("Cannot copy file : %s to : %s", $filePath, $imagePath), ModuleException::CODE_NOT_FOUND); } $image->setFile($imageFileName); $image->save($con); $con->commit(); $imagePosition++; } } } } /** * @return Module * @throws \Thelia\Exception\ModuleException */ public function getModuleModel() { $moduleModel = ModuleQuery::create()->findOneByCode($this->getCode()); if (null === $moduleModel) { throw new ModuleException(sprintf("Module Code `%s` not found", $this->getCode()), ModuleException::CODE_NOT_FOUND); } return $moduleModel; } public function getCode() { if (null === $this->reflected) { $this->reflected = new \ReflectionObject($this); } return basename(dirname($this->reflected->getFileName())); } /** * Check if this module is the payment module for a given order * * @param Order $order an order * @return bool true if this module is the payment module for the given order. */ public function isPaymentModuleFor(Order $order) { $model = $this->getModuleModel(); return $order->getPaymentModuleId() == $model->getId(); } /** * Check if this module is the delivery module for a given order * * @param Order $order an order * @return bool true if this module is the delivery module for the given order. */ public function isDeliveryModuleFor(Order $order) { $model = $this->getModuleModel(); return $order->getDeliveryModuleId() == $model->getId(); } /** * A convenient method to get the current order total, with or without tax, discount or postage. * This method operates on the order currently in the user's session, and should not be used to * get the total amount of an order already stored in the database. For such orders, use * Order::getTotalAmount() method. * * @param bool $with_tax if true, to total price will include tax amount * @param bool $with_discount if true, the total price will include discount, if any * @param bool $with_postage if true, the total price will include the delivery costs, if any. * * @return float|int the current order amount. */ public function getCurrentOrderTotalAmount($with_tax = true, $with_discount = true, $with_postage = true) { /** @var Session $session */ $session = $this->getRequest()->getSession(); /** @var Cart $cart */ $cart = $session->getCart(); /** @var Order $order */ $order = $session->getOrder(); /** @var TaxEngine $taxEngine */ $taxEngine = $this->getContainer()->get("thelia.taxengine"); /** @var Country $country */ $country = $taxEngine->getDeliveryCountry(); $amount = $with_tax ? $cart->getTaxedAmount($country, $with_discount) : $cart->getTotalAmount($with_discount); if ($with_postage) { $amount += $order->getPostage(); } return $amount; } /** * * This method adds new compilers to Thelia container * * You must return an array. This array can contain : * - arrays * - one or many instance(s) of \Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface * * in the first case, your array must contains 2 indexes. The first is the compiler instance and the second the compilerPass type. * Example : * return array( * array( * new \MyModule\DependencyInjection\Compiler\MySuperCompilerPass(), * \Symfony\Component\DependencyInjection\Compiler\PassConfig::TYPE_BEFORE_OPTIMIZATION * ) * ); * * In the seconde case, just an instance of CompilerPassInterface. * Example : * return array ( * new \MyModule\DependencyInjection\Compiler\MySuperCompilerPass() * ); * * But you can combine both behaviors * Example : * * return array( * new \MyModule\DependencyInjection\Compiler\MySuperCompilerPass(), * array( * new \MyModule\DependencyInjection\Compiler\MyOtherSuperCompilerPass(), * Symfony\Component\DependencyInjection\Compiler\PassConfig::TYPE_BEFORE_OPTIMIZATION * ) * ); * */ public static function getCompilers() { return array(); } /** * This method is called when the plugin is installed for the first time, using * zip upload method. * * @param ConnectionInterface $con */ public function install(ConnectionInterface $con = null) { // Override this method to do something useful. } /** * This method is called before the module activation, and may prevent it by returning false. * * @param ConnectionInterface $con * * @return bool true to continue module activation, false to prevent it. */ public function preActivation(ConnectionInterface $con = null) { // Override this method to do something useful. return true; } /** * This method is called just after the module was successfully activated. * * @param ConnectionInterface $con */ public function postActivation(ConnectionInterface $con = null) { // Override this method to do something useful. } /** * This method is called before the module de-activation, and may prevent it by returning false. * * @param ConnectionInterface $con * @return bool true to continue module de-activation, false to prevent it. */ public function preDeactivation(ConnectionInterface $con = null) { // Override this method to do something useful. return true; } public function postDeactivation(ConnectionInterface $con = null) { // Override this method to do something useful. } /** * This method is called just before the deletion of the module, giving the module an opportunity * to delete its data. * * @param ConnectionInterface $con * @param bool $deleteModuleData if true, the module should remove all its data from the system. */ public function destroy(ConnectionInterface $con = null, $deleteModuleData = false) { // Override this method to do something useful. } /** * @return array * * This method must be used when your module defines hooks. * Override this and return an array of your hooks names to register them * * This returned value must be like the example, only type and code are mandatory * * Example: * * return array( * * // Only register the title in the default language * array( * "type" => TemplateDefinition::BACK_OFFICE, * "code" => "my_super_hook_name", * "title" => "My hook", * "description" => "My hook is really, really great", * ), * * // Manage i18n * array( * "type" => TemplateDefinition::FRONT_OFFICE, * "code" => "my_hook_name", * "title" => array( * "fr_FR" => "Mon Hook", * "en_US" => "My hook", * ), * "description" => array( * "fr_FR" => "Mon hook est vraiment super", * "en_US" => "My hook is really, really great", * ), * "chapo" => array( * "fr_FR" => "Mon hook est vraiment super", * "en_US" => "My hook is really, really great", * ), * "block" => true, * "active" => true * ) * ); */ public function getHooks() { return array(); } public function registerHooks() { $moduleHooks = $this->getHooks(); if (is_array($moduleHooks) && !empty($moduleHooks)) { $allowedTypes = (array) TemplateDefinition::getStandardTemplatesSubdirsIterator(); $defaultLang = Lang::getDefaultLanguage(); $defaultLocale = $defaultLang->getLocale(); $dispatcher = $this->container->get("event_dispatcher"); foreach ($moduleHooks as $hook) { $isValid = is_array($hook) && isset($hook["type"]) && array_key_exists($hook["type"], $allowedTypes) && isset($hook["code"]) && is_string($hook["code"]) && !empty($hook["code"]) ; if (!$isValid) { Tlog::getInstance()->notice("The module ".$this->getCode()." tried to register an invalid hook"); continue; } /** * Create or update hook db entry. */ list($hookModel, $updateData) = $this->createOrUpdateHook($hook, $dispatcher, $defaultLocale); /** * Update translations */ $event = new HookUpdateEvent($hookModel->getId()); foreach ($updateData as $locale => $data) { $event ->setCode($hookModel->getCode()) ->setNative($hookModel->getNative()) ->setByModule($hookModel->getByModule()) ->setActive($hookModel->getActivate()) ->setBlock($hookModel->getBlock()) ->setNative($hookModel->getNative()) ->setType($hookModel->getType()) ->setLocale($locale) ->setChapo($data["chapo"]) ->setTitle($data["title"]) ->setDescription($data["description"]) ; $dispatcher->dispatch(TheliaEvents::HOOK_UPDATE, $event); } } } } protected function createOrUpdateHook(array $hook, EventDispatcherInterface $dispatcher, $defaultLocale) { $hookModel = HookQuery::create()->filterByCode($hook["code"])->findOne(); if ($hookModel === null) { $event = new HookCreateAllEvent(); } else { $event = new HookUpdateEvent($hookModel->getId()); } /** * Get used I18n variables */ $locale = $defaultLocale; list($titles, $descriptions, $chapos) = $this->getHookI18nInfo($hook, $defaultLocale); /** * If the default locale exists * extract it to save it in create action * * otherwise take the first */ if (isset($titles[$defaultLocale])) { $title = $titles[$defaultLocale]; unset($titles[$defaultLocale]); } else { reset($titles); $locale = key($titles); $title = array_shift($titles); } $description = $this->arrayKeyPop($locale, $descriptions); $chapo = $this->arrayKeyPop($locale, $chapos); /** * Set data */ $event ->setBlock(isset($hook["block"]) && (bool) $hook["block"]) ->setLocale($locale) ->setTitle($title) ->setDescription($description) ->setChapo($chapo) ->setType($hook["type"]) ->setCode($hook["code"]) ->setNative(false) ->setByModule(true) ->setActive(isset($hook["active"]) && (bool) $hook["active"]) ; /** * Dispatch the event */ $dispatcher->dispatch( ( $hookModel === null ? TheliaEvents::HOOK_CREATE_ALL : TheliaEvents::HOOK_UPDATE ), $event ); return [ $event->getHook(), $this->formatHookDataForI18n($titles, $descriptions, $chapos) ]; } protected function formatHookDataForI18n(array $titles, array $descriptions, array $chapos) { $locales = array_merge( array_keys($titles), array_keys($descriptions), array_keys($chapos) ); $locales = array_unique($locales); $data = array(); foreach ($locales as $locale) { $row = array(); $row["title"] = !isset($titles[$locale]) ?: $titles[$locale]; $row["description"] = !isset($descriptions[$locale]) ?: $descriptions[$locale]; $row["chapo"] = !isset($chapos[$locale]) ?: $chapos[$locale]; } return $data; } protected function getHookI18nInfo(array $hook, $defaultLocale) { $titles = array(); $descriptions = array(); $chapos = array(); /** * Get the defined titles */ if (isset($hook["title"])) { $titles = $this->extractI18nValues($hook["title"], $defaultLocale); } /** * Then the defined descriptions */ if (isset($hook["description"])) { $descriptions = $this->extractI18nValues($hook["description"], $defaultLocale); } /** * Then the short descriptions */ if (isset($hook["chapo"])) { $chapos = $this->extractI18nValues($hook["chapo"], $defaultLocale); } return [$titles, $descriptions, $chapos]; } protected function extractI18nValues($data, $defaultLocale) { $returnData = array(); if (is_array($data)) { foreach ($data as $key => $value) { if (!is_string($key)) { continue; } $returnData[$key] = $value; } } elseif (is_scalar($data)) { $returnData[$defaultLocale] = $data; } return $returnData; } protected function arrayKeyPop($key, array &$array) { $value = null; if (array_key_exists($key, $array)) { $value = $array[$key]; unset($array[$key]); } return $value; } }
1
10,476
this namespace is never used
thelia-thelia
php
@@ -26,6 +26,13 @@ // see URLOpener. // See https://godoc.org/gocloud.dev#hdr-URLs for background information. // +// Message Delivery Semantics +// +// AWS SNS and SQS combine to support at-least-once semantics; applications must +// call Message.Ack/Nack after processing a message, or it will be redelivered. +// See https://godoc.org/gocloud.dev/pubsub#hdr-At_most_once_and_At_least_once_Delivery +// for more background. +// // Escaping // // Go CDK supports all UTF-8 strings; to make this work with providers lacking
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package awssnssqs provides an implementation of pubsub that uses AWS // SNS (Simple Notification Service) and SQS (Simple Queueing Service). // // URLs // // For pubsub.OpenTopic and pubsub.OpenSubscription, awssnssqs registers // for the scheme's "awssns" and "awssqs" respectively. // The default URL opener will use an AWS session with the default credentials // and configuration; see https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ // for more details. // To customize the URL opener, or for more details on the URL format, // see URLOpener. // See https://godoc.org/gocloud.dev#hdr-URLs for background information. // // Escaping // // Go CDK supports all UTF-8 strings; to make this work with providers lacking // full UTF-8 support, strings must be escaped (during writes) and unescaped // (during reads). The following escapes are required for awssnssqs: // - Metadata keys: Characters other than "a-zA-z0-9_-.", and additionally "." // when it's at the start of the key or the previous character was ".", // are escaped using "__0x<hex>__". These characters were determined by // experimentation. // - Metadata values: Escaped using URL encoding. // - Message body: AWS SNS/SQS only supports UTF-8 strings. See the // BodyBase64Encoding enum in TopicOptions for strategies on how to send // non-UTF-8 message bodies. By default, non-UTF-8 message bodies are base64 // encoded. // // As // // awssnssqs exposes the following types for As: // - Topic: *sns.SNS // - Subscription: *sqs.SQS // - Message: *sqs.Message // - Error: awserror.Error package awssnssqs // import "gocloud.dev/pubsub/awssnssqs" import ( "context" "encoding/base64" "encoding/json" "fmt" "net/url" "path" "strconv" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/google/wire" gcaws "gocloud.dev/aws" "gocloud.dev/gcerrors" "gocloud.dev/internal/batcher" "gocloud.dev/internal/escape" "gocloud.dev/internal/gcerr" "gocloud.dev/pubsub" "gocloud.dev/pubsub/driver" ) const ( // base64EncodedKey is the Message Attribute key used to flag that the // message body is base64 encoded. base64EncodedKey = "base64encoded" // How long ReceiveBatch should wait if no messages are available; controls // the poll interval of requests to SQS. noMessagesPollDuration = 250 * time.Millisecond ) var sendBatcherOpts = &batcher.Options{ MaxBatchSize: 1, // SendBatch only supports one message at a time MaxHandlers: 100, // max concurrency for sends } var recvBatcherOpts = &batcher.Options{ // SQS supports receiving at most 10 messages at a time: // https://godoc.org/github.com/aws/aws-sdk-go/service/sqs#SQS.ReceiveMessage MaxBatchSize: 10, MaxHandlers: 100, // max concurrency for receives } var ackBatcherOpts = &batcher.Options{ // SQS supports deleting/updating at most 10 messages at a time: // https://godoc.org/github.com/aws/aws-sdk-go/service/sqs#SQS.DeleteMessageBatch // https://godoc.org/github.com/aws/aws-sdk-go/service/sqs#SQS.ChangeMessageVisibilityBatch MaxBatchSize: 10, MaxHandlers: 100, // max concurrency for acks } func init() { lazy := new(lazySessionOpener) pubsub.DefaultURLMux().RegisterTopic(SNSScheme, lazy) pubsub.DefaultURLMux().RegisterSubscription(SQSScheme, lazy) } // Set holds Wire providers for this package. var Set = wire.NewSet( SubscriptionOptions{}, TopicOptions{}, URLOpener{}, ) // lazySessionOpener obtains the AWS session from the environment on the first // call to OpenXXXURL. type lazySessionOpener struct { init sync.Once opener *URLOpener err error } func (o *lazySessionOpener) defaultOpener() (*URLOpener, error) { o.init.Do(func() { sess, err := session.NewSessionWithOptions(session.Options{SharedConfigState: session.SharedConfigEnable}) if err != nil { o.err = err return } o.opener = &URLOpener{ ConfigProvider: sess, } }) return o.opener, o.err } func (o *lazySessionOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) { opener, err := o.defaultOpener() if err != nil { return nil, fmt.Errorf("open topic %v: failed to open default session: %v", u, err) } return opener.OpenTopicURL(ctx, u) } func (o *lazySessionOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) { opener, err := o.defaultOpener() if err != nil { return nil, fmt.Errorf("open subscription %v: failed to open default session: %v", u, err) } return opener.OpenSubscriptionURL(ctx, u) } // SNSScheme is the URL scheme for pubsub.OpenTopic awssnssqs registers its URLOpeners under on pubsub.DefaultMux. const SNSScheme = "awssns" // SQSScheme is the URL scheme for pubsub.OpenSubscription awssnssqs registers its URLOpeners under on pubsub.DefaultMux. const SQSScheme = "awssqs" // URLOpener opens AWS SNS/SQS URLs like "awssns://sns-topic-arn" for // topics or "awssqs://sqs-queue-url" for subscriptions. // // For topics, the URL's host+path is used as the topic Amazon Resource Name // (ARN). // // For subscriptions, the URL's host+path is prefixed with "https://" to create // the queue URL. // // See gocloud.dev/aws/ConfigFromURLParams for supported query parameters // that affect the default AWS session. type URLOpener struct { // ConfigProvider configures the connection to AWS. ConfigProvider client.ConfigProvider // TopicOptions specifies the options to pass to OpenTopic. TopicOptions TopicOptions // SubscriptionOptions specifies the options to pass to OpenSubscription. SubscriptionOptions SubscriptionOptions } // OpenTopicURL opens a pubsub.Topic based on u. func (o *URLOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) { configProvider := &gcaws.ConfigOverrider{ Base: o.ConfigProvider, } overrideCfg, err := gcaws.ConfigFromURLParams(u.Query()) if err != nil { return nil, fmt.Errorf("open topic %v: %v", u, err) } configProvider.Configs = append(configProvider.Configs, overrideCfg) topicARN := path.Join(u.Host, u.Path) return OpenTopic(ctx, configProvider, topicARN, &o.TopicOptions), nil } // OpenSubscriptionURL opens a pubsub.Subscription based on u. func (o *URLOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) { configProvider := &gcaws.ConfigOverrider{ Base: o.ConfigProvider, } overrideCfg, err := gcaws.ConfigFromURLParams(u.Query()) if err != nil { return nil, fmt.Errorf("open subscription %v: %v", u, err) } configProvider.Configs = append(configProvider.Configs, overrideCfg) qURL := "https://" + path.Join(u.Host, u.Path) return OpenSubscription(ctx, configProvider, qURL, &o.SubscriptionOptions), nil } type topic struct { client *sns.SNS arn string opts *TopicOptions } // BodyBase64Encoding is an enum of strategies for when to base64 message // bodies. type BodyBase64Encoding int const ( // NonUTF8Only means that message bodies that are valid UTF-8 encodings are // sent as-is. Invalid UTF-8 message bodies are base64 encoded, and a // MessageAttribute with key "base64encoded" is added to the message. // When receiving messages, the "base64encoded" attribute is used to determine // whether to base64 decode, and is then filtered out. NonUTF8Only BodyBase64Encoding = 0 // Always means that all message bodies are base64 encoded. // A MessageAttribute with key "base64encoded" is added to the message. // When receiving messages, the "base64encoded" attribute is used to determine // whether to base64 decode, and is then filtered out. Always BodyBase64Encoding = 1 // Never means that message bodies are never base64 encoded. Non-UTF-8 // bytes in message bodies may be modified by SNS/SQS. Never BodyBase64Encoding = 2 ) func (e BodyBase64Encoding) wantEncode(b []byte) bool { switch e { case Always: return true case Never: return false case NonUTF8Only: return !utf8.Valid(b) } panic("unreachable") } // TopicOptions contains configuration options for topics. type TopicOptions struct { // BodyBase64Encoding determines when message bodies are base64 encoded. // The default is NonUTF8Only. BodyBase64Encoding BodyBase64Encoding } // OpenTopic opens the a topic that sends to the SNS topic with the given Amazon // Resource Name (ARN). func OpenTopic(ctx context.Context, sess client.ConfigProvider, topicARN string, opts *TopicOptions) *pubsub.Topic { return pubsub.NewTopic(openTopic(ctx, sess, topicARN, opts), sendBatcherOpts) } // openTopic returns the driver for OpenTopic. This function exists so the test // harness can get the driver interface implementation if it needs to. func openTopic(ctx context.Context, sess client.ConfigProvider, topicARN string, opts *TopicOptions) driver.Topic { if opts == nil { opts = &TopicOptions{} } return &topic{ client: sns.New(sess), arn: topicARN, opts: opts, } } var stringDataType = aws.String("String") // SendBatch implements driver.Topic.SendBatch. func (t *topic) SendBatch(ctx context.Context, dms []*driver.Message) error { if len(dms) != 1 { panic("awssnssqs.SendBatch should only get one message at a time") } dm := dms[0] attrs := map[string]*sns.MessageAttributeValue{} for k, v := range dm.Metadata { // See the package comments for more details on escaping of metadata // keys & values. k = escape.HexEscape(k, func(runes []rune, i int) bool { c := runes[i] switch { case escape.IsASCIIAlphanumeric(c): return false case c == '_' || c == '-': return false case c == '.' && i != 0 && runes[i-1] != '.': return false } return true }) attrs[k] = &sns.MessageAttributeValue{ DataType: stringDataType, StringValue: aws.String(escape.URLEscape(v)), } } var body string if t.opts.BodyBase64Encoding.wantEncode(dm.Body) { body = base64.StdEncoding.EncodeToString(dm.Body) attrs[base64EncodedKey] = &sns.MessageAttributeValue{ DataType: stringDataType, StringValue: aws.String("true"), } } else { body = string(dm.Body) } _, err := t.client.PublishWithContext(ctx, &sns.PublishInput{ Message: aws.String(body), MessageAttributes: attrs, TopicArn: &t.arn, }) return err } // IsRetryable implements driver.Topic.IsRetryable. func (t *topic) IsRetryable(error) bool { // The client handles retries. return false } // As implements driver.Topic.As. func (t *topic) As(i interface{}) bool { c, ok := i.(**sns.SNS) if !ok { return false } *c = t.client return true } // ErrorAs implements driver.Topic.ErrorAs. func (t *topic) ErrorAs(err error, i interface{}) bool { return errorAs(err, i) } // ErrorCode implements driver.Topic.ErrorCode. func (t *topic) ErrorCode(err error) gcerrors.ErrorCode { return errorCode(err) } func errorCode(err error) gcerrors.ErrorCode { ae, ok := err.(awserr.Error) if !ok { return gcerr.Unknown } ec, ok := errorCodeMap[ae.Code()] if !ok { return gcerr.Unknown } return ec } var errorCodeMap = map[string]gcerrors.ErrorCode{ sns.ErrCodeAuthorizationErrorException: gcerr.PermissionDenied, sns.ErrCodeKMSAccessDeniedException: gcerr.PermissionDenied, sns.ErrCodeKMSDisabledException: gcerr.FailedPrecondition, sns.ErrCodeKMSInvalidStateException: gcerr.FailedPrecondition, sns.ErrCodeKMSOptInRequired: gcerr.FailedPrecondition, sqs.ErrCodeMessageNotInflight: gcerr.FailedPrecondition, sqs.ErrCodePurgeQueueInProgress: gcerr.FailedPrecondition, sqs.ErrCodeQueueDeletedRecently: gcerr.FailedPrecondition, sqs.ErrCodeQueueDoesNotExist: gcerr.FailedPrecondition, sqs.ErrCodeQueueNameExists: gcerr.FailedPrecondition, sns.ErrCodeInternalErrorException: gcerr.Internal, sns.ErrCodeInvalidParameterException: gcerr.InvalidArgument, sns.ErrCodeInvalidParameterValueException: gcerr.InvalidArgument, sqs.ErrCodeBatchEntryIdsNotDistinct: gcerr.InvalidArgument, sqs.ErrCodeBatchRequestTooLong: gcerr.InvalidArgument, sqs.ErrCodeEmptyBatchRequest: gcerr.InvalidArgument, sqs.ErrCodeInvalidAttributeName: gcerr.InvalidArgument, sqs.ErrCodeInvalidBatchEntryId: gcerr.InvalidArgument, sqs.ErrCodeInvalidIdFormat: gcerr.InvalidArgument, sqs.ErrCodeInvalidMessageContents: gcerr.InvalidArgument, sqs.ErrCodeReceiptHandleIsInvalid: gcerr.InvalidArgument, sqs.ErrCodeTooManyEntriesInBatchRequest: gcerr.InvalidArgument, sqs.ErrCodeUnsupportedOperation: gcerr.InvalidArgument, sns.ErrCodeInvalidSecurityException: gcerr.PermissionDenied, sns.ErrCodeKMSNotFoundException: gcerr.NotFound, sns.ErrCodeNotFoundException: gcerr.NotFound, sns.ErrCodeFilterPolicyLimitExceededException: gcerr.ResourceExhausted, sns.ErrCodeSubscriptionLimitExceededException: gcerr.ResourceExhausted, sns.ErrCodeTopicLimitExceededException: gcerr.ResourceExhausted, sqs.ErrCodeOverLimit: gcerr.ResourceExhausted, sns.ErrCodeKMSThrottlingException: gcerr.ResourceExhausted, sns.ErrCodeThrottledException: gcerr.ResourceExhausted, "RequestCanceled": gcerr.Canceled, sns.ErrCodeEndpointDisabledException: gcerr.Unknown, sns.ErrCodePlatformApplicationDisabledException: gcerr.Unknown, } type subscription struct { client *sqs.SQS qURL string } // SubscriptionOptions will contain configuration for subscriptions. type SubscriptionOptions struct{} // OpenSubscription opens a on AWS SQS for the given SQS client and queue URL. // The queue is assumed to be subscribed to some SNS topic, though there is no // check for this. func OpenSubscription(ctx context.Context, sess client.ConfigProvider, qURL string, opts *SubscriptionOptions) *pubsub.Subscription { return pubsub.NewSubscription(openSubscription(ctx, sess, qURL), recvBatcherOpts, ackBatcherOpts) } // openSubscription returns a driver.Subscription. func openSubscription(ctx context.Context, sess client.ConfigProvider, qURL string) driver.Subscription { return &subscription{client: sqs.New(sess), qURL: qURL} } // ReceiveBatch implements driver.Subscription.ReceiveBatch. func (s *subscription) ReceiveBatch(ctx context.Context, maxMessages int) ([]*driver.Message, error) { output, err := s.client.ReceiveMessageWithContext(ctx, &sqs.ReceiveMessageInput{ QueueUrl: aws.String(s.qURL), MaxNumberOfMessages: aws.Int64(int64(maxMessages)), }) if err != nil { return nil, err } var ms []*driver.Message for _, m := range output.Messages { type MsgBody struct { Message string MessageAttributes map[string]struct{ Value string } } var body MsgBody if err := json.Unmarshal([]byte(*m.Body), &body); err != nil { return nil, err } // See BodyBase64Encoding for details on when we base64 decode message bodies. decodeIt := false attrs := map[string]string{} for k, v := range body.MessageAttributes { if k == base64EncodedKey { decodeIt = true continue } // See the package comments for more details on escaping of metadata // keys & values. attrs[escape.HexUnescape(k)] = escape.URLUnescape(v.Value) } var b []byte if decodeIt { var err error b, err = base64.StdEncoding.DecodeString(body.Message) if err != nil { // Fall back to using the raw message. b = []byte(body.Message) } } else { b = []byte(body.Message) } m2 := &driver.Message{ Body: b, Metadata: attrs, AckID: m.ReceiptHandle, AsFunc: func(i interface{}) bool { p, ok := i.(**sqs.Message) if !ok { return false } *p = m return true }, } ms = append(ms, m2) } if len(ms) == 0 { // When we return no messages and no error, the portable type will call // ReceiveBatch again immediately. Sleep for a bit to avoid hammering SQS // with RPCs. time.Sleep(noMessagesPollDuration) } return ms, nil } // SendAcks implements driver.Subscription.SendAcks. func (s *subscription) SendAcks(ctx context.Context, ids []driver.AckID) error { req := &sqs.DeleteMessageBatchInput{QueueUrl: aws.String(s.qURL)} for _, id := range ids { req.Entries = append(req.Entries, &sqs.DeleteMessageBatchRequestEntry{ Id: aws.String(strconv.Itoa(len(req.Entries))), ReceiptHandle: id.(*string), }) } resp, err := s.client.DeleteMessageBatchWithContext(ctx, req) if err != nil { return err } // Note: DeleteMessageBatch doesn't return failures when you try // to Delete an id that isn't found. if numFailed := len(resp.Failed); numFailed > 0 { first := resp.Failed[0] return awserr.New(aws.StringValue(first.Code), fmt.Sprintf("sqs.DeleteMessageBatch failed for %d message(s): %s", numFailed, aws.StringValue(first.Message)), nil) } return nil } // SendNacks implements driver.Subscription.SendNacks. func (s *subscription) SendNacks(ctx context.Context, ids []driver.AckID) error { req := &sqs.ChangeMessageVisibilityBatchInput{QueueUrl: aws.String(s.qURL)} for _, id := range ids { req.Entries = append(req.Entries, &sqs.ChangeMessageVisibilityBatchRequestEntry{ Id: aws.String(strconv.Itoa(len(req.Entries))), ReceiptHandle: id.(*string), VisibilityTimeout: aws.Int64(0), }) } resp, err := s.client.ChangeMessageVisibilityBatchWithContext(ctx, req) if err != nil { return err } // Note: ChangeMessageVisibilityBatch returns failures when you try to // modify an id that isn't found; drop those. var firstFail *sqs.BatchResultErrorEntry numFailed := 0 for _, fail := range resp.Failed { if aws.StringValue(fail.Code) == sqs.ErrCodeReceiptHandleIsInvalid { continue } if numFailed == 0 { firstFail = fail } numFailed++ } if numFailed > 0 { return awserr.New(aws.StringValue(firstFail.Code), fmt.Sprintf("sqs.ChangeMessageVisibilityBatch failed for %d message(s): %s", numFailed, aws.StringValue(firstFail.Message)), nil) } return nil } // IsRetryable implements driver.Subscription.IsRetryable. func (*subscription) IsRetryable(error) bool { // The client handles retries. return false } // As implements driver.Subscription.As. func (s *subscription) As(i interface{}) bool { c, ok := i.(**sqs.SQS) if !ok { return false } *c = s.client return true } // ErrorAs implements driver.Subscription.ErrorAs. func (s *subscription) ErrorAs(err error, i interface{}) bool { return errorAs(err, i) } // ErrorCode implements driver.Subscription.ErrorCode. func (*subscription) ErrorCode(err error) gcerrors.ErrorCode { return errorCode(err) } func errorAs(err error, i interface{}) bool { e, ok := err.(awserr.Error) if !ok { return false } p, ok := i.(*awserr.Error) if !ok { return false } *p = e return true } // AckFunc implements driver.Subscription.AckFunc. func (*subscription) AckFunc() func() { return nil }
1
16,507
Well, if you call `Nack`, it _will_ be redelivered. So maybe reword.
google-go-cloud
go
@@ -7,7 +7,9 @@ <div class="text-box-wrapper"> <div class="text-box"> - <p class="videowrapper"><%= wistia_video_embed('39245f55fd') %></p> + <p class="videowrapper"> + <iframe allowtransparency="true" class="wistia_embed" frameborder="0" height="367" name="wistia_embed" scrolling="no" src="https://fast.wistia.com/embed/iframe/39245f55fd?videoWidth=653&videoHeight=367&controlsVisibleOnLoad=true" width="653"></iframe> + </p> <h3>An interview with a Ruby developer who uses Linux and full Open Source stack</h3> <div class='assets'>
1
<% content_for :page_title, "An OSS Lifestyle" %> <% content_for :subject, "An OSS Lifestyle" %> <div class="summary"> <p>In this <b>free 34 minute video</b> Mike Burns, Linux user and thoughtbot developer, and Chad Pytel, Founder and CEO of thoughtbot, discuss the current state of using Linux as your operating system and why, as a developer, you might want to use it.</p> </div> <div class="text-box-wrapper"> <div class="text-box"> <p class="videowrapper"><%= wistia_video_embed('39245f55fd') %></p> <h3>An interview with a Ruby developer who uses Linux and full Open Source stack</h3> <div class='assets'> Download: <a class='asset original' href='http://thoughtbot.wistia.com/medias/1094741/download?asset=original'> Original (720p) <span class='size'>3 GB</span> </a> <a class='asset iphone' href='http://thoughtbot.wistia.com/medias/1094741/download?asset=iphone'> iPhone <span class='size'>200 MB</span> </a> <a class='asset hd_mp4' href='http://thoughtbot.wistia.com/medias/1094741/download?asset=hd_mp4'> HD MP4 <span class='size'>500 MB</span> </a> <a class='asset hd_flash' href='http://thoughtbot.wistia.com/medias/1094741/download?asset=hd_flash'> HD Flash <span class='size'>500 MB</span> </a> <a class='asset flash' href='http://thoughtbot.wistia.com/medias/1094741/download?asset=flash'> Flash <span class='size'>200 MB</span> </a> </div> </div> </div>
1
9,586
Why did we make this a static string?
thoughtbot-upcase
rb
@@ -148,7 +148,7 @@ def bigquery_dataset(item): parent = item.parent() name = '//bigquery.googleapis.com/projects/{}/datasets/{}'.format( parent['projectNumber'], item['datasetReference']['datasetId']) - asset_type = 'google.bigquery.Dataset' + asset_type = 'google.cloud.bigquery.Dataset' parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) return _create_asset(name, asset_type, parent_name, item.data(), None)
1
# Copyright 2018 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Update CAI mock dump data with new resources. When the inventory mock_gcp_results.py file is updated, then this script should be run to update the cai dump files with the additional resources. From the top forseti-security dir, run: PYTHONPATH=. python tests/services/inventory/update_cai_dumps.py """ import json import os from tests.services.inventory import gcp_api_mocks from google.cloud.forseti.common.util import logger from google.cloud.forseti.services.base.config import InventoryConfig from google.cloud.forseti.services.inventory.base.progress import Progresser from google.cloud.forseti.services.inventory.base.storage import Memory as MemoryStorage from google.cloud.forseti.services.inventory.crawler import run_crawler LOGGER = logger.get_logger(__name__) MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) RESOURCE_DUMP_FILE = os.path.join(MODULE_DIR, 'test_data', 'mock_cai_resources.dump') IAM_POLICY_DUMP_FILE = os.path.join(MODULE_DIR, 'test_data', 'mock_cai_iam_policies.dump') ADDITIONAL_RESOURCES_FILE = os.path.join(MODULE_DIR, 'test_data', 'additional_cai_resources.dump') ADDITIONAL_IAM_POLCIIES_FILE = os.path.join(MODULE_DIR, 'test_data', 'additional_cai_iam_policies.dump') class TestServiceConfig(object): """ServiceConfig stub.""" def __init__(self, engine, inventory_config): self.engine = engine self.inventory_config = inventory_config def get_engine(self): """Stub.""" return self.engine class NullProgresser(Progresser): """No-op progresser to suppress output.""" def __init__(self): super(NullProgresser, self).__init__() def on_new_object(self, resource): pass def on_warning(self, warning): LOGGER.error("Progressor Warning: %s", warning) pass def on_error(self, error): LOGGER.exception("Progressor Error: %s", error) pass def get_summary(self): pass def _create_asset(name, asset_type, parent_name, data_dict, iam_policy_dict): resource = { 'name': name, 'asset_type': asset_type, 'resource': {'data': data_dict}} if parent_name: resource['resource']['parent'] = parent_name resource_data = json.dumps(resource, separators=(',',':'), sort_keys=True) if iam_policy_dict: iam_policy = { 'name': name, 'asset_type': asset_type, 'iam_policy': iam_policy_dict} iam_policy_data = json.dumps(iam_policy, separators=(',',':'), sort_keys=True) else: iam_policy_data = None return (resource_data, iam_policy_data) def organization(item): name = '//cloudresourcemanager.googleapis.com/{}'.format(item['name']) asset_type = 'google.cloud.resourcemanager.Organization' return _create_asset(name, asset_type, None, item.data(), item.get_iam_policy()) def folder(item): name = '//cloudresourcemanager.googleapis.com/{}'.format(item['name']) asset_type = 'google.cloud.resourcemanager.Folder' parent_name = '//cloudresourcemanager.googleapis.com/{}'.format( item['parent']) return _create_asset(name, asset_type, parent_name, item.data(), item.get_iam_policy()) def project(item): name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( item['projectNumber']) asset_type = 'google.cloud.resourcemanager.Project' parent_name = '//cloudresourcemanager.googleapis.com/{}s/{}'.format( item['parent']['type'], item['parent']['id']) return _create_asset(name, asset_type, parent_name, item.data(), item.get_iam_policy()) def appengine_app(item): parent = item.parent() name = '//appengine.googleapis.com/{}'.format(item['name']) asset_type = 'google.appengine.Application' parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) return _create_asset(name, asset_type, parent_name, item.data(), None) def appengine_service(item): name = '//appengine.googleapis.com/{}'.format(item['name']) asset_type = 'google.appengine.Service' return _create_asset(name, asset_type, None, item.data(), None) def appengine_version(item): name = '//appengine.googleapis.com/{}'.format(item['name']) asset_type = 'google.appengine.Version' return _create_asset(name, asset_type, None, item.data(), None) def bigquery_dataset(item): parent = item.parent() name = '//bigquery.googleapis.com/projects/{}/datasets/{}'.format( parent['projectNumber'], item['datasetReference']['datasetId']) asset_type = 'google.bigquery.Dataset' parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) return _create_asset(name, asset_type, parent_name, item.data(), None) def billing_account(item): name = '//cloudbilling.googleapis.com/{}'.format(item['name']) asset_type = 'google.cloud.billing.BillingAccount' parent_name = '' return _create_asset(name, asset_type, parent_name, item.data(), item.get_iam_policy()) def bucket(item): parent = item.parent() name = '//storage.googleapis.com/{}'.format(item['name']) asset_type = 'google.cloud.storage.Bucket' parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) data = item.data() # CAI does not include acl data. data.pop('acl') data.pop('defaultObjectAcl') return _create_asset(name, asset_type, parent_name, data, item.get_iam_policy()) def role(item): parent = item.parent() if not parent: return (None, None) if parent.type() == 'organization': parent_name = '//cloudresourcemanager.googleapis.com/{}'.format( parent['name']) else: parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) name = '//iam.googleapis.com/{}'.format(item['name']) asset_type = 'google.iam.Role' return _create_asset(name, asset_type, parent_name, item.data(), None) def serviceaccount(item): parent = item.parent() name = '//iam.googleapis.com/projects/{}/serviceAccounts/{}'.format( item['projectId'], item['uniqueId']) asset_type = 'google.iam.ServiceAccount' parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) return _create_asset(name, asset_type, parent_name, item.data(), item.get_iam_policy()) def kubernetes_cluster(item): parent = item.parent() name = ('//container.googleapis.com/v1/projects/{}/locations/{}/' 'clusters/{}'.format(parent['projectId'], item['zone'], item['name'])) asset_type = 'google.container.Cluster' parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) return _create_asset(name, asset_type, parent_name, item.data(), None) def _create_compute_asset(item, asset_type): parent = item.parent() self_link = '/'.join(item['selfLink'].split('/')[5:]) name = '//compute.googleapis.com/{}'.format(self_link) parent_name = '//cloudresourcemanager.googleapis.com/projects/{}'.format( parent['projectNumber']) return _create_asset(name, asset_type, parent_name, item.data(), None) def backendservice(item): return _create_compute_asset(item, 'google.compute.BackendService') def disk(item): return _create_compute_asset(item, 'google.compute.Disk') def firewall(item): return _create_compute_asset(item, 'google.compute.Firewall') def forwardingrule(item): return _create_compute_asset(item, 'google.compute.ForwardingRule') def image(item): return _create_compute_asset(item, 'google.compute.Image') def instance(item): return _create_compute_asset(item, 'google.compute.Instance') def instancegroup(item): return _create_compute_asset(item, 'google.compute.InstanceGroup') def instancegroupmanager(item): return _create_compute_asset(item, 'google.compute.InstanceGroupManager') def instancetemplate(item): return _create_compute_asset(item, 'google.compute.InstanceTemplate') def network(item): return _create_compute_asset(item, 'google.compute.Network') def snapshot(item): return _create_compute_asset(item, 'google.compute.Snapshot') def subnetwork(item): return _create_compute_asset(item, 'google.compute.Subnetwork') CAI_TYPE_MAP = { 'organization': organization, 'folder': folder, 'project': project, 'appengine_app': appengine_app, 'appengine_service': appengine_service, 'appengine_version': appengine_version, 'billing_account': billing_account, 'bucket': bucket, 'backendservice': backendservice, 'dataset': bigquery_dataset, 'disk': disk, 'firewall': firewall, 'forwardingrule': forwardingrule, 'image': image, 'instance': instance, 'instancegroup': instancegroup, 'instancegroupmanager': instancegroupmanager, 'instancetemplate': instancetemplate, 'kubernetes_cluster': kubernetes_cluster, 'network': network, 'role': role, 'serviceaccount': serviceaccount, 'snapshot': snapshot, 'subnetwork': subnetwork, } def write_data(data, destination): """Write data to destination.""" with open(destination, 'w') as f: for line in data: f.write(line) f.write('\n') def convert_item_to_assets(item): """Convert the data in an item to Asset protos in json format.""" if item.type() in CAI_TYPE_MAP: func = CAI_TYPE_MAP[item.type()] return func(item) return None, None def main(): """Create CAI dump files from fake data.""" logger.enable_console_log() config = InventoryConfig( gcp_api_mocks.ORGANIZATION_ID, '', {}, '', {'enabled': False}) service_config = TestServiceConfig('sqlite', config) config.set_service_config(service_config) resources = [] iam_policies = [] with MemoryStorage() as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp(): run_crawler(storage, progresser, config, parallel=False) for item in storage.mem.values(): (resource, iam_policy) = convert_item_to_assets(item) if resource: resources.append(resource) if iam_policy: iam_policies.append(iam_policy) with open(ADDITIONAL_RESOURCES_FILE, 'r') as f: for line in f: if line.startswith('#'): continue resources.append(line.strip()) with open(ADDITIONAL_IAM_POLCIIES_FILE, 'r') as f: for line in f: if line.startswith('#'): continue iam_policies.append(line.strip()) write_data(resources, RESOURCE_DUMP_FILE) write_data(iam_policies, IAM_POLICY_DUMP_FILE) if __name__ == '__main__': main()
1
32,917
Please re-run PYTHONPATH=. python tests/services/inventory/update_cai_dumps.py following the instructions at the top of this file to ensure the test files have the correct CAI asset type.
forseti-security-forseti-security
py
@@ -66,9 +66,9 @@ func TestCreateContract(t *testing.T) { addr := identityset.Address(28) _, err = accountutil.LoadOrCreateAccount(sm, addr.String()) require.NoError(err) - stateDB := NewStateDBAdapter(sm, 0, !cfg.Genesis.IsAleutian(0), - cfg.Genesis.IsGreenland(0), cfg.Genesis.IsKamchatka(0), - cfg.Genesis.IsLordHowe(0), hash.ZeroHash256) + opt := []StateDBAdapterOption{NotFixTopicCopyBugOption()} + stateDB := NewStateDBAdapter(sm, 0, hash.ZeroHash256, opt...) + contract := addr.Bytes() var evmContract common.Address copy(evmContract[:], contract[:])
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package evm import ( "math/big" "testing" "github.com/ethereum/go-ethereum/common" "github.com/golang/mock/gomock" "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/db/batch" "github.com/iotexproject/iotex-core/state" "github.com/iotexproject/iotex-core/test/identityset" "github.com/iotexproject/iotex-core/test/mock/mock_chainmanager" "github.com/iotexproject/iotex-core/testutil" ) func TestCreateContract(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) testTriePath, err := testutil.PathOfTempFile("trie") require.NoError(err) cfg := config.Default cfg.Chain.TrieDBPath = testTriePath sm := mock_chainmanager.NewMockStateManager(ctrl) cb := batch.NewCachedBatch() sm.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn( func(account interface{}, opts ...protocol.StateOption) (uint64, error) { cfg, err := protocol.CreateStateConfig(opts...) if err != nil { return 0, err } val, err := cb.Get("state", cfg.Key) if err != nil { return 0, state.ErrStateNotExist } return 0, state.Deserialize(account, val) }).AnyTimes() sm.EXPECT().PutState(gomock.Any(), gomock.Any()).DoAndReturn( func(account interface{}, opts ...protocol.StateOption) (uint64, error) { cfg, err := protocol.CreateStateConfig(opts...) if err != nil { return 0, err } ss, err := state.Serialize(account) if err != nil { return 0, err } cb.Put("state", cfg.Key, ss, "failed to put state") return 0, nil }).AnyTimes() addr := identityset.Address(28) _, err = accountutil.LoadOrCreateAccount(sm, addr.String()) require.NoError(err) stateDB := NewStateDBAdapter(sm, 0, !cfg.Genesis.IsAleutian(0), cfg.Genesis.IsGreenland(0), cfg.Genesis.IsKamchatka(0), cfg.Genesis.IsLordHowe(0), hash.ZeroHash256) contract := addr.Bytes() var evmContract common.Address copy(evmContract[:], contract[:]) stateDB.SetCode(evmContract, bytecode) // contract exist codeHash := stateDB.GetCodeHash(evmContract) var emptyEVMHash common.Hash require.NotEqual(emptyEVMHash, codeHash) v := stateDB.GetCode(evmContract) require.Equal(bytecode, v) // non-existing contract addr1 := hash.Hash160b([]byte("random")) var evmAddr1 common.Address copy(evmAddr1[:], addr1[:]) h := stateDB.GetCodeHash(evmAddr1) require.Equal(emptyEVMHash, h) require.Nil(stateDB.GetCode(evmAddr1)) require.NoError(stateDB.CommitContracts()) stateDB.clear() // reload same contract contract1, err := accountutil.LoadOrCreateAccount(sm, addr.String()) require.NoError(err) require.Equal(codeHash[:], contract1.CodeHash) } func TestLoadStoreCommit(t *testing.T) { require := require.New(t) testLoadStoreCommit := func(cfg config.Config, t *testing.T, enableAsync bool) { ctrl := gomock.NewController(t) sm, err := initMockStateManager(ctrl) require.NoError(err) cntr1, err := newContract(hash.BytesToHash160(c1[:]), &state.Account{}, sm, enableAsync) require.NoError(err) tests := []cntrTest{ { cntr1, []code{ {c1, []byte("2nd contract creation")}, }, []set{ {k1b, v1b[:], nil}, {k2b, v2b[:], nil}, }, }, { cntr1, []code{ {c2, bytecode}, }, []set{ {k1b, v4b[:], nil}, {k2b, v3b[:], nil}, {k3b, v2b[:], nil}, {k4b, v1b[:], nil}, }, }, { cntr1, nil, []set{ {k1b, v2b[:], nil}, {k2b, v1b[:], nil}, {k3b, v4b[:], nil}, {k4b, nil, nil}, }, }, } for i, test := range tests { c := test.contract // set code for _, e := range test.codes { c.SetCode(hash.Hash256b(e.v), e.v) } // set states for _, e := range test.states { require.NoError(c.SetState(e.k, e.v)) if i > 0 { // committed state == value of previous test's SetState() committed := tests[i-1].states for _, e := range committed { v, err := c.GetCommittedState(e.k) require.NoError(err) require.Equal(e.v, v) } } v, err := c.GetState(e.k) require.NoError(err) require.Equal(e.v, v) } require.NoError(c.Commit()) } checks := []cntrTest{ { cntr1, []code{ {c1, bytecode}, }, []set{ {k1b, v2b[:], nil}, {k2b, v1b[:], nil}, {k3b, v4b[:], nil}, {k4b, nil, nil}, }, }, } for _, test := range checks { c := test.contract // check code for _, e := range test.codes { v, err := c.GetCode() require.NoError(err) require.Equal(e.v, v) chash := hash.Hash256b(e.v) require.Equal(chash[:], c.SelfState().CodeHash) require.NotEqual(hash.ZeroHash256, hash.BytesToHash256(chash[:])) } // check states for _, e := range test.states { v, err := c.GetState(e.k) require.Equal(e.v, v) if err != nil { require.Equal(e.cause, errors.Cause(err)) } } } } cfg := config.Default t.Run("contract load/store with stateDB, sync mode", func(t *testing.T) { testTriePath, err := testutil.PathOfTempFile("trie") require.NoError(err) defer func() { testutil.CleanupPath(t, testTriePath) }() cfg.Chain.TrieDBPath = testTriePath testLoadStoreCommit(cfg, t, false) }) t.Run("contract load/store with stateDB, async mode", func(t *testing.T) { testTriePath, err := testutil.PathOfTempFile("trie") require.NoError(err) defer func() { testutil.CleanupPath(t, testTriePath) }() cfg := config.Default cfg.Chain.TrieDBPath = testTriePath testLoadStoreCommit(cfg, t, true) }) t.Run("contract load/store with trie, sync mode", func(t *testing.T) { testTriePath2, err := testutil.PathOfTempFile("trie") require.NoError(err) defer func() { testutil.CleanupPath(t, testTriePath2) }() cfg.Chain.EnableTrielessStateDB = false cfg.Chain.TrieDBPath = testTriePath2 testLoadStoreCommit(cfg, t, false) }) t.Run("contract load/store with trie, async mode", func(t *testing.T) { testTriePath2, err := testutil.PathOfTempFile("trie") require.NoError(err) defer func() { testutil.CleanupPath(t, testTriePath2) }() cfg.Chain.EnableTrielessStateDB = false cfg.Chain.TrieDBPath = testTriePath2 testLoadStoreCommit(cfg, t, true) }) } func TestSnapshot(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) testfunc := func(enableAsync bool) { sm, err := initMockStateManager(ctrl) require.NoError(err) s := &state.Account{ Balance: big.NewInt(5), } c1, err := newContract( hash.BytesToHash160(identityset.Address(28).Bytes()), s, sm, enableAsync, ) require.NoError(err) require.NoError(c1.SetState(k2b, v2[:])) c2 := c1.Snapshot() require.NoError(c1.SelfState().AddBalance(big.NewInt(7))) require.NoError(c1.SetState(k1b, v1[:])) require.Equal(big.NewInt(12), c1.SelfState().Balance) require.Equal(big.NewInt(5), c2.SelfState().Balance) } t.Run("sync mode", func(t *testing.T) { testfunc(false) }) t.Run("async mode", func(t *testing.T) { testfunc(true) }) }
1
23,958
`stateDB := NewStateDBAdapter(sm, 0, hash.ZeroHash256, NotFixTopicCopyBugOption())`
iotexproject-iotex-core
go
@@ -446,8 +446,17 @@ class HintManager(QObject): # Short hints are the number of hints we can possibly show which are # (needed - 1) digits in length. if needed > min_chars: - short_count = math.floor((len(chars) ** needed - len(elems)) / - len(chars)) + total_space = len(chars) ** needed + # Calculate short_count naively, by finding the avaiable space and + # dividing by the number of spots we would loose by adding a + # short element + short_count = math.floor((total_space - len(elems)) / + (len(chars))) + # Check if we double counted above to warrant another short_count + # https://github.com/qutebrowser/qutebrowser/issues/3242 + if total_space - (short_count * len(chars) + + (len(elems) - short_count)) >= len(chars) - 1: + short_count += 1 else: short_count = 0
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """A HintManager to draw hints over links.""" import collections import functools import math import re import html import enum from string import ascii_lowercase import attr from PyQt5.QtCore import pyqtSlot, QObject, Qt, QUrl from PyQt5.QtWidgets import QLabel from qutebrowser.config import config from qutebrowser.keyinput import modeman, modeparsers from qutebrowser.browser import webelem from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners from qutebrowser.utils import usertypes, log, qtutils, message, objreg, utils Target = enum.Enum('Target', ['normal', 'current', 'tab', 'tab_fg', 'tab_bg', 'window', 'yank', 'yank_primary', 'run', 'fill', 'hover', 'download', 'userscript', 'spawn']) class HintingError(Exception): """Exception raised on errors during hinting.""" def on_mode_entered(mode, win_id): """Stop hinting when insert mode was entered.""" if mode == usertypes.KeyMode.insert: modeman.leave(win_id, usertypes.KeyMode.hint, 'insert mode', maybe=True) class HintLabel(QLabel): """A label for a link. Attributes: elem: The element this label belongs to. _context: The current hinting context. """ STYLESHEET = """ QLabel { background-color: {{ conf.colors.hints.bg }}; color: {{ conf.colors.hints.fg }}; font: {{ conf.fonts.hints }}; border: {{ conf.hints.border }}; padding-left: -3px; padding-right: -3px; } """ def __init__(self, elem, context): super().__init__(parent=context.tab) self._context = context self.elem = elem self.setAttribute(Qt.WA_StyledBackground, True) config.set_register_stylesheet(self) self._context.tab.contents_size_changed.connect(self._move_to_elem) self._move_to_elem() self.show() def __repr__(self): try: text = self.text() except RuntimeError: text = '<deleted>' return utils.get_repr(self, elem=self.elem, text=text) def update_text(self, matched, unmatched): """Set the text for the hint. Args: matched: The part of the text which was typed. unmatched: The part of the text which was not typed yet. """ if (config.val.hints.uppercase and self._context.hint_mode in ['letter', 'word']): matched = html.escape(matched.upper()) unmatched = html.escape(unmatched.upper()) else: matched = html.escape(matched) unmatched = html.escape(unmatched) match_color = html.escape(config.val.colors.hints.match.fg) self.setText('<font color="{}">{}</font>{}'.format( match_color, matched, unmatched)) self.adjustSize() @pyqtSlot() def _move_to_elem(self): """Reposition the label to its element.""" if not self.elem.has_frame(): # This sometimes happens for some reason... log.hints.debug("Frame for {!r} vanished!".format(self)) self.hide() return no_js = config.val.hints.find_implementation != 'javascript' rect = self.elem.rect_on_view(no_js=no_js) self.move(rect.x(), rect.y()) def cleanup(self): """Clean up this element and hide it.""" self.hide() self.deleteLater() @attr.s class HintContext: """Context namespace used for hinting. Attributes: all_labels: A list of all HintLabel objects ever created. labels: A mapping from key strings to HintLabel objects. May contain less elements than `all_labels` due to filtering. baseurl: The URL of the current page. target: What to do with the opened links. normal/current/tab/tab_fg/tab_bg/window: Get passed to BrowserTab. yank/yank_primary: Yank to clipboard/primary selection. run: Run a command. fill: Fill commandline with link. download: Download the link. userscript: Call a custom userscript. spawn: Spawn a simple command. to_follow: The link to follow when enter is pressed. args: Custom arguments for userscript/spawn rapid: Whether to do rapid hinting. add_history: Whether to add yanked or spawned link to the history. filterstr: Used to save the filter string for restoring in rapid mode. tab: The WebTab object we started hinting in. group: The group of web elements to hint. """ all_labels = attr.ib(attr.Factory(list)) labels = attr.ib(attr.Factory(dict)) target = attr.ib(None) baseurl = attr.ib(None) to_follow = attr.ib(None) rapid = attr.ib(False) add_history = attr.ib(False) filterstr = attr.ib(None) args = attr.ib(attr.Factory(list)) tab = attr.ib(None) group = attr.ib(None) hint_mode = attr.ib(None) def get_args(self, urlstr): """Get the arguments, with {hint-url} replaced by the given URL.""" args = [] for arg in self.args: arg = arg.replace('{hint-url}', urlstr) args.append(arg) return args class HintActions: """Actions which can be done after selecting a hint.""" def __init__(self, win_id): self._win_id = win_id def click(self, elem, context): """Click an element. Args: elem: The QWebElement to click. context: The HintContext to use. """ target_mapping = { Target.normal: usertypes.ClickTarget.normal, Target.current: usertypes.ClickTarget.normal, Target.tab_fg: usertypes.ClickTarget.tab, Target.tab_bg: usertypes.ClickTarget.tab_bg, Target.window: usertypes.ClickTarget.window, Target.hover: usertypes.ClickTarget.normal, } if config.val.tabs.background: target_mapping[Target.tab] = usertypes.ClickTarget.tab_bg else: target_mapping[Target.tab] = usertypes.ClickTarget.tab if context.target in [Target.normal, Target.current]: # Set the pre-jump mark ', so we can jump back here after following tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tabbed_browser.set_mark("'") try: if context.target == Target.hover: elem.hover() elif context.target == Target.current: elem.remove_blank_target() elem.click(target_mapping[context.target]) else: elem.click(target_mapping[context.target]) except webelem.Error as e: raise HintingError(str(e)) def yank(self, url, context): """Yank an element to the clipboard or primary selection. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ sel = (context.target == Target.yank_primary and utils.supports_selection()) flags = QUrl.FullyEncoded | QUrl.RemovePassword if url.scheme() == 'mailto': flags |= QUrl.RemoveScheme urlstr = url.toString(flags) utils.set_clipboard(urlstr, selection=sel) msg = "Yanked URL to {}: {}".format( "primary selection" if sel else "clipboard", urlstr) message.info(msg) def run_cmd(self, url, context): """Run the command based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely(' '.join(args)) def preset_cmd_text(self, url, context): """Preset a commandline text based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toDisplayString(QUrl.FullyEncoded) args = context.get_args(urlstr) text = ' '.join(args) if text[0] not in modeparsers.STARTCHARS: raise HintingError("Invalid command text '{}'.".format(text)) cmd = objreg.get('status-command', scope='window', window=self._win_id) cmd.set_cmd_text(text) def download(self, elem, context): """Download a hint URL. Args: elem: The QWebElement to download. _context: The HintContext to use. """ url = elem.resolve_url(context.baseurl) if url is None: raise HintingError("No suitable link found for this element.") prompt = False if context.rapid else None qnam = context.tab.networkaccessmanager() user_agent = context.tab.user_agent() # FIXME:qtwebengine do this with QtWebEngine downloads? download_manager = objreg.get('qtnetwork-download-manager', scope='window', window=self._win_id) download_manager.get(url, qnam=qnam, user_agent=user_agent, prompt_download_directory=prompt) def call_userscript(self, elem, context): """Call a userscript from a hint. Args: elem: The QWebElement to use in the userscript. context: The HintContext to use. """ cmd = context.args[0] args = context.args[1:] env = { 'QUTE_MODE': 'hints', 'QUTE_SELECTED_TEXT': str(elem), 'QUTE_SELECTED_HTML': elem.outer_xml(), } url = elem.resolve_url(context.baseurl) if url is not None: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) try: userscripts.run_async(context.tab, cmd, *args, win_id=self._win_id, env=env) except userscripts.Error as e: raise HintingError(str(e)) def spawn(self, url, context): """Spawn a simple command from a hint. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely('spawn ' + ' '.join(args)) class HintManager(QObject): """Manage drawing hints over links or other elements. Class attributes: HINT_TEXTS: Text displayed for different hinting modes. Attributes: _context: The HintContext for the current invocation. _win_id: The window ID this HintManager is associated with. _tab_id: The tab ID this HintManager is associated with. Signals: See HintActions """ HINT_TEXTS = { Target.normal: "Follow hint", Target.current: "Follow hint in current tab", Target.tab: "Follow hint in new tab", Target.tab_fg: "Follow hint in foreground tab", Target.tab_bg: "Follow hint in background tab", Target.window: "Follow hint in new window", Target.yank: "Yank hint to clipboard", Target.yank_primary: "Yank hint to primary selection", Target.run: "Run a command on a hint", Target.fill: "Set hint in commandline", Target.hover: "Hover over a hint", Target.download: "Download hint", Target.userscript: "Call userscript via hint", Target.spawn: "Spawn command via hint", } def __init__(self, win_id, tab_id, parent=None): """Constructor.""" super().__init__(parent) self._win_id = win_id self._tab_id = tab_id self._context = None self._word_hinter = WordHinter() self._actions = HintActions(win_id) mode_manager = objreg.get('mode-manager', scope='window', window=win_id) mode_manager.left.connect(self.on_mode_left) def _get_text(self): """Get a hint text based on the current context.""" text = self.HINT_TEXTS[self._context.target] if self._context.rapid: text += ' (rapid mode)' text += '...' return text def _cleanup(self): """Clean up after hinting.""" # pylint: disable=not-an-iterable for label in self._context.all_labels: label.cleanup() # pylint: enable=not-an-iterable text = self._get_text() message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.maybe_reset_text(text) self._context = None def _hint_strings(self, elems): """Calculate the hint strings for elems. Inspired by Vimium. Args: elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ if not elems: return [] hint_mode = self._context.hint_mode if hint_mode == 'word': try: return self._word_hinter.hint(elems) except HintingError as e: message.error(str(e)) # falls back on letter hints if hint_mode == 'number': chars = '0123456789' else: chars = config.val.hints.chars min_chars = config.val.hints.min_chars if config.val.hints.scatter and hint_mode != 'number': return self._hint_scattered(min_chars, chars, elems) else: return self._hint_linear(min_chars, chars, elems) def _hint_scattered(self, min_chars, chars, elems): """Produce scattered hint labels with variable length (like Vimium). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ # Determine how many digits the link hints will require in the worst # case. Usually we do not need all of these digits for every link # single hint, so we can show shorter hints for a few of the links. needed = max(min_chars, math.ceil(math.log(len(elems), len(chars)))) # Short hints are the number of hints we can possibly show which are # (needed - 1) digits in length. if needed > min_chars: short_count = math.floor((len(chars) ** needed - len(elems)) / len(chars)) else: short_count = 0 long_count = len(elems) - short_count strings = [] if needed > 1: for i in range(short_count): strings.append(self._number_to_hint_str(i, chars, needed - 1)) start = short_count * len(chars) for i in range(start, start + long_count): strings.append(self._number_to_hint_str(i, chars, needed)) return self._shuffle_hints(strings, len(chars)) def _hint_linear(self, min_chars, chars, elems): """Produce linear hint labels with constant length (like dwb). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ strings = [] needed = max(min_chars, math.ceil(math.log(len(elems), len(chars)))) for i in range(len(elems)): strings.append(self._number_to_hint_str(i, chars, needed)) return strings def _shuffle_hints(self, hints, length): """Shuffle the given set of hints so that they're scattered. Hints starting with the same character will be spread evenly throughout the array. Inspired by Vimium. Args: hints: A list of hint strings. length: Length of the available charset. Return: A list of shuffled hint strings. """ buckets = [[] for i in range(length)] for i, hint in enumerate(hints): buckets[i % len(buckets)].append(hint) result = [] for bucket in buckets: result += bucket return result def _number_to_hint_str(self, number, chars, digits=0): """Convert a number like "8" into a hint string like "JK". This is used to sequentially generate all of the hint text. The hint string will be "padded with zeroes" to ensure its length is >= digits. Inspired by Vimium. Args: number: The hint number. chars: The charset to use. digits: The minimum output length. Return: A hint string. """ base = len(chars) hintstr = [] remainder = 0 while True: remainder = number % base hintstr.insert(0, chars[remainder]) number -= remainder number //= base if number <= 0: break # Pad the hint string we're returning so that it matches digits. for _ in range(0, digits - len(hintstr)): hintstr.insert(0, chars[0]) return ''.join(hintstr) def _check_args(self, target, *args): """Check the arguments passed to start() and raise if they're wrong. Args: target: A Target enum member. args: Arguments for userscript/download """ if not isinstance(target, Target): raise TypeError("Target {} is no Target member!".format(target)) if target in [Target.userscript, Target.spawn, Target.run, Target.fill]: if not args: raise cmdexc.CommandError( "'args' is required with target userscript/spawn/run/" "fill.") else: if args: raise cmdexc.CommandError( "'args' is only allowed with target userscript/spawn.") def _filter_matches(self, filterstr, elemstr): """Return True if `filterstr` matches `elemstr`.""" # Empty string and None always match if not filterstr: return True filterstr = filterstr.casefold() elemstr = elemstr.casefold() # Do multi-word matching return all(word in elemstr for word in filterstr.split()) def _filter_matches_exactly(self, filterstr, elemstr): """Return True if `filterstr` exactly matches `elemstr`.""" # Empty string and None never match if not filterstr: return False filterstr = filterstr.casefold() elemstr = elemstr.casefold() return filterstr == elemstr def _start_cb(self, elems): """Initialize the elements and labels based on the context set.""" if self._context is None: log.hints.debug("In _start_cb without context!") return if elems is None: message.error("There was an error while getting hint elements") return if not elems: message.error("No elements found.") return strings = self._hint_strings(elems) log.hints.debug("hints: {}".format(', '.join(strings))) for elem, string in zip(elems, strings): label = HintLabel(elem, self._context) label.update_text('', string) self._context.all_labels.append(label) self._context.labels[string] = label keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings) message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.set_text(self._get_text()) modeman.enter(self._win_id, usertypes.KeyMode.hint, 'HintManager.start') # to make auto_follow == 'always' work self._handle_auto_follow() @cmdutils.register(instance='hintmanager', scope='tab', name='hint', star_args_optional=True, maxsplit=2) @cmdutils.argument('win_id', win_id=True) def start(self, rapid=False, group=webelem.Group.all, target=Target.normal, *args, win_id, mode=None, add_history=False): """Start hinting. Args: rapid: Whether to do rapid hinting. With rapid hinting, the hint mode isn't left after a hint is followed, so you can easily open multiple links. This is only possible with targets `tab` (with `tabs.background_tabs=true`), `tab-bg`, `window`, `run`, `hover`, `userscript` and `spawn`. add_history: Whether to add the spawned or yanked link to the browsing history. group: The element types to hint. - `all`: All clickable elements. - `links`: Only links. - `images`: Only images. - `inputs`: Only input fields. target: What to do with the selected element. - `normal`: Open the link. - `current`: Open the link in the current tab. - `tab`: Open the link in a new tab (honoring the `tabs.background_tabs` setting). - `tab-fg`: Open the link in a new foreground tab. - `tab-bg`: Open the link in a new background tab. - `window`: Open the link in a new window. - `hover` : Hover over the link. - `yank`: Yank the link to the clipboard. - `yank-primary`: Yank the link to the primary selection. - `run`: Run the argument as command. - `fill`: Fill the commandline with the command given as argument. - `download`: Download the link. - `userscript`: Call a userscript with `$QUTE_URL` set to the link. - `spawn`: Spawn a command. mode: The hinting mode to use. - `number`: Use numeric hints. - `letter`: Use the chars in the hints.chars setting. - `word`: Use hint words based on the html elements and the extra words. *args: Arguments for spawn/userscript/run/fill. - With `spawn`: The executable and arguments to spawn. `{hint-url}` will get replaced by the selected URL. - With `userscript`: The userscript to execute. Either store the userscript in `~/.local/share/qutebrowser/userscripts` (or `$XDG_DATA_DIR`), or use an absolute path. - With `fill`: The command to fill the statusbar with. `{hint-url}` will get replaced by the selected URL. - With `run`: Same as `fill`. """ tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tab = tabbed_browser.currentWidget() if tab is None: raise cmdexc.CommandError("No WebView available yet!") mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) if mode_manager.mode == usertypes.KeyMode.hint: modeman.leave(win_id, usertypes.KeyMode.hint, 're-hinting') if rapid: if target in [Target.tab_bg, Target.window, Target.run, Target.hover, Target.userscript, Target.spawn, Target.download, Target.normal, Target.current]: pass elif target == Target.tab and config.val.tabs.background: pass else: name = target.name.replace('_', '-') raise cmdexc.CommandError("Rapid hinting makes no sense with " "target {}!".format(name)) if mode is None: mode = config.val.hints.mode self._check_args(target, *args) self._context = HintContext() self._context.tab = tab self._context.target = target self._context.rapid = rapid self._context.hint_mode = mode self._context.add_history = add_history try: self._context.baseurl = tabbed_browser.current_url() except qtutils.QtValueError: raise cmdexc.CommandError("No URL set for this page yet!") self._context.args = args self._context.group = group selector = webelem.SELECTORS[self._context.group] self._context.tab.elements.find_css(selector, self._start_cb, only_visible=True) def current_mode(self): """Return the currently active hinting mode (or None otherwise).""" if self._context is None: return None return self._context.hint_mode def _handle_auto_follow(self, keystr="", filterstr="", visible=None): """Handle the auto_follow option.""" if visible is None: visible = {string: label for string, label in self._context.labels.items() if label.isVisible()} if len(visible) != 1: return auto_follow = config.val.hints.auto_follow if auto_follow == "always": follow = True elif auto_follow == "unique-match": follow = keystr or filterstr elif auto_follow == "full-match": elemstr = str(list(visible.values())[0].elem) filter_match = self._filter_matches_exactly(filterstr, elemstr) follow = (keystr in visible) or filter_match else: follow = False # save the keystr of the only one visible hint to be picked up # later by self.follow_hint self._context.to_follow = list(visible.keys())[0] if follow: # apply auto_follow_timeout timeout = config.val.hints.auto_follow_timeout keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) normal_parser = keyparsers[usertypes.KeyMode.normal] normal_parser.set_inhibited_timeout(timeout) # unpacking gets us the first (and only) key in the dict. self._fire(*visible) def handle_partial_key(self, keystr): """Handle a new partial keypress.""" if self._context is None: log.hints.debug("Got key without context!") return log.hints.debug("Handling new keystring: '{}'".format(keystr)) for string, label in self._context.labels.items(): try: if string.startswith(keystr): matched = string[:len(keystr)] rest = string[len(keystr):] label.update_text(matched, rest) # Show label again if it was hidden before label.show() else: # element doesn't match anymore -> hide it, unless in rapid # mode and hide_unmatched_rapid_hints is false (see #1799) if (not self._context.rapid or config.val.hints.hide_unmatched_rapid_hints): label.hide() except webelem.Error: pass self._handle_auto_follow(keystr=keystr) def filter_hints(self, filterstr): """Filter displayed hints according to a text. Args: filterstr: The string to filter with, or None to use the filter from previous call (saved in `self._filterstr`). If `filterstr` is an empty string or if both `filterstr` and `self._filterstr` are None, all hints are shown. """ if filterstr is None: filterstr = self._context.filterstr else: self._context.filterstr = filterstr log.hints.debug("Filtering hints on {!r}".format(filterstr)) visible = [] # pylint: disable=not-an-iterable for label in self._context.all_labels: try: if self._filter_matches(filterstr, str(label.elem)): visible.append(label) # Show label again if it was hidden before label.show() else: # element doesn't match anymore -> hide it label.hide() except webelem.Error: pass # pylint: enable=not-an-iterable if not visible: # Whoops, filtered all hints modeman.leave(self._win_id, usertypes.KeyMode.hint, 'all filtered') return if self._context.hint_mode == 'number': # renumber filtered hints strings = self._hint_strings(visible) self._context.labels = {} for label, string in zip(visible, strings): label.update_text('', string) self._context.labels[string] = label keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings, preserve_filter=True) # Note: filter_hints can be called with non-None filterstr only # when number mode is active if filterstr is not None: # pass self._context.labels as the dict of visible hints self._handle_auto_follow(filterstr=filterstr, visible=self._context.labels) def _fire(self, keystr): """Fire a completed hint. Args: keystr: The keychain string to follow. """ # Handlers which take a QWebElement elem_handlers = { Target.normal: self._actions.click, Target.current: self._actions.click, Target.tab: self._actions.click, Target.tab_fg: self._actions.click, Target.tab_bg: self._actions.click, Target.window: self._actions.click, Target.hover: self._actions.click, # _download needs a QWebElement to get the frame. Target.download: self._actions.download, Target.userscript: self._actions.call_userscript, } # Handlers which take a QUrl url_handlers = { Target.yank: self._actions.yank, Target.yank_primary: self._actions.yank, Target.run: self._actions.run_cmd, Target.fill: self._actions.preset_cmd_text, Target.spawn: self._actions.spawn, } elem = self._context.labels[keystr].elem if not elem.has_frame(): message.error("This element has no webframe.") return if self._context.target in elem_handlers: handler = functools.partial(elem_handlers[self._context.target], elem, self._context) elif self._context.target in url_handlers: url = elem.resolve_url(self._context.baseurl) if url is None: message.error("No suitable link found for this element.") return handler = functools.partial(url_handlers[self._context.target], url, self._context) if self._context.add_history: objreg.get('web-history').add_url(url, "") else: raise ValueError("No suitable handler found!") if not self._context.rapid: modeman.leave(self._win_id, usertypes.KeyMode.hint, 'followed', maybe=True) else: # Reset filtering self.filter_hints(None) # Undo keystring highlighting for string, label in self._context.labels.items(): label.update_text('', string) try: handler() except HintingError as e: message.error(str(e)) @cmdutils.register(instance='hintmanager', scope='tab', modes=[usertypes.KeyMode.hint]) def follow_hint(self, keystring=None): """Follow a hint. Args: keystring: The hint to follow, or None. """ if keystring is None: if self._context.to_follow is None: raise cmdexc.CommandError("No hint to follow") else: keystring = self._context.to_follow elif keystring not in self._context.labels: raise cmdexc.CommandError("No hint {}!".format(keystring)) self._fire(keystring) @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Stop hinting when hinting mode was left.""" if mode != usertypes.KeyMode.hint or self._context is None: # We have one HintManager per tab, so when this gets called, # self._context might be None, because the current tab is not # hinting. return self._cleanup() class WordHinter: """Generator for word hints. Attributes: words: A set of words to be used when no "smart hint" can be derived from the hinted element. """ def __init__(self): # will be initialized on first use. self.words = set() self.dictionary = None def ensure_initialized(self): """Generate the used words if yet uninitialized.""" dictionary = config.val.hints.dictionary if not self.words or self.dictionary != dictionary: self.words.clear() self.dictionary = dictionary try: with open(dictionary, encoding="UTF-8") as wordfile: alphabet = set(ascii_lowercase) hints = set() lines = (line.rstrip().lower() for line in wordfile) for word in lines: if set(word) - alphabet: # contains none-alphabetic chars continue if len(word) > 4: # we don't need words longer than 4 continue for i in range(len(word)): # remove all prefixes of this word hints.discard(word[:i + 1]) hints.add(word) self.words.update(hints) except IOError as e: error = "Word hints requires reading the file at {}: {}" raise HintingError(error.format(dictionary, str(e))) def extract_tag_words(self, elem): """Extract tag words form the given element.""" attr_extractors = { "alt": lambda elem: elem["alt"], "name": lambda elem: elem["name"], "title": lambda elem: elem["title"], "placeholder": lambda elem: elem["placeholder"], "src": lambda elem: elem["src"].split('/')[-1], "href": lambda elem: elem["href"].split('/')[-1], "text": str, } extractable_attrs = collections.defaultdict(list, { "img": ["alt", "title", "src"], "a": ["title", "href", "text"], "input": ["name", "placeholder"], "textarea": ["name", "placeholder"], "button": ["text"] }) return (attr_extractors[attr](elem) for attr in extractable_attrs[elem.tag_name()] if attr in elem or attr == "text") def tag_words_to_hints(self, words): """Take words and transform them to proper hints if possible.""" for candidate in words: if not candidate: continue match = re.search('[A-Za-z]{3,}', candidate) if not match: continue if 4 < match.end() - match.start() < 8: yield candidate[match.start():match.end()].lower() def any_prefix(self, hint, existing): return any(hint.startswith(e) or e.startswith(hint) for e in existing) def filter_prefixes(self, hints, existing): return (h for h in hints if not self.any_prefix(h, existing)) def new_hint_for(self, elem, existing, fallback): """Return a hint for elem, not conflicting with the existing.""" new = self.tag_words_to_hints(self.extract_tag_words(elem)) new_no_prefixes = self.filter_prefixes(new, existing) fallback_no_prefixes = self.filter_prefixes(fallback, existing) # either the first good, or None return (next(new_no_prefixes, None) or next(fallback_no_prefixes, None)) def hint(self, elems): """Produce hint labels based on the html tags. Produce hint words based on the link text and random words from the words arg as fallback. Args: words: Words to use as fallback when no link text can be used. elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ self.ensure_initialized() hints = [] used_hints = set() words = iter(self.words) for elem in elems: hint = self.new_hint_for(elem, used_hints, words) if not hint: raise HintingError("Not enough words in the dictionary.") used_hints.add(hint) hints.append(hint) return hints
1
19,787
There are unneeded parens here, but I'll fix it up when merging.
qutebrowser-qutebrowser
py
@@ -62,6 +62,12 @@ static void setup_globals(mrb_state *mrb) h2o_mruby_eval_expr(mrb, "$LOAD_PATH << \"#{$H2O_ROOT}/share/h2o/mruby\""); h2o_mruby_assert(mrb); + + /* require and include built-in libraries */ + h2o_mruby_eval_expr(mrb, "require \"h2o.rb\"\n" + "require \"acl.rb\"\n" + "include H2O::ACL\n"); + h2o_mruby_assert(mrb); } mrb_value h2o_mruby_to_str(mrb_state *mrb, mrb_value v)
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Ryosuke Matsumoto, * Masayoshi Takahashi * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <errno.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <mruby.h> #include <mruby/proc.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/compile.h> #include <mruby/error.h> #include <mruby/hash.h> #include <mruby/string.h> #include <mruby/throw.h> #include <mruby/variable.h> #include <mruby_input_stream.h> #include "h2o.h" #include "h2o/mruby_.h" #define STATUS_FALLTHRU 399 #define FALLTHRU_SET_PREFIX "x-fallthru-set-" #define FREEZE_STRING(v) RSTR_SET_FROZEN_FLAG(mrb_str_ptr(v)) __thread h2o_mruby_generator_t *h2o_mruby_current_generator = NULL; void h2o_mruby__assert_failed(mrb_state *mrb, const char *file, int line) { mrb_value obj = mrb_funcall(mrb, mrb_obj_value(mrb->exc), "inspect", 0); struct RString *error = mrb_str_ptr(obj); fprintf(stderr, "unexpected ruby error at file: \"%s\", line %d: %s", file, line, error->as.heap.ptr); abort(); } static void setup_globals(mrb_state *mrb) { const char *root = getenv("H2O_ROOT"); if (root == NULL) root = H2O_TO_STR(H2O_ROOT); mrb_gv_set(mrb, mrb_intern_lit(mrb, "$H2O_ROOT"), mrb_str_new(mrb, root, strlen(root))); h2o_mruby_eval_expr(mrb, "$LOAD_PATH << \"#{$H2O_ROOT}/share/h2o/mruby\""); h2o_mruby_assert(mrb); } mrb_value h2o_mruby_to_str(mrb_state *mrb, mrb_value v) { if (!mrb_string_p(v)) H2O_MRUBY_EXEC_GUARD({ v = mrb_str_to_str(mrb, v); }); return v; } mrb_value h2o_mruby_eval_expr(mrb_state *mrb, const char *expr) { return mrb_funcall(mrb, mrb_top_self(mrb), "eval", 1, mrb_str_new_cstr(mrb, expr)); } void h2o_mruby_define_callback(mrb_state *mrb, const char *name, int id) { char buf[1024]; sprintf(buf, "module Kernel\n" " def %s(*args)\n" " ret = Fiber.yield([\n" " %d,\n" " _h2o_create_resumer(),\n" " args,\n" " ])\n" " if ret.kind_of? Exception\n" " raise ret\n" " end\n" " ret\n" " end\n" "end", name, id); h2o_mruby_eval_expr(mrb, buf); if (mrb->exc != NULL) { fprintf(stderr, "failed to define mruby function: %s\n", name); h2o_mruby_assert(mrb); } } mrb_value h2o_mruby_create_data_instance(mrb_state *mrb, mrb_value class_obj, void *ptr, const mrb_data_type *type) { struct RClass *klass = mrb_class_ptr(class_obj); struct RData *data = mrb_data_object_alloc(mrb, klass, ptr, type); return mrb_obj_value(data); } mrb_value h2o_mruby_compile_code(mrb_state *mrb, h2o_mruby_config_vars_t *config, char *errbuf) { mrbc_context *cxt; struct mrb_parser_state *parser; struct RProc *proc = NULL; mrb_value result = mrb_nil_value(); setup_globals(mrb); /* parse */ if ((cxt = mrbc_context_new(mrb)) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } if (config->path != NULL) mrbc_filename(mrb, cxt, config->path); cxt->capture_errors = 1; cxt->lineno = config->lineno; if ((parser = mrb_parse_nstring(mrb, config->source.base, (int)config->source.len, cxt)) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } /* return erro if errbuf is supplied, or abort */ if (parser->nerr != 0) { if (errbuf == NULL) { fprintf(stderr, "%s: internal error (unexpected state)\n", H2O_MRUBY_MODULE_NAME); abort(); } snprintf(errbuf, 256, "line %d:%s", parser->error_buffer[0].lineno, parser->error_buffer[0].message); strcat(errbuf, "\n\n"); if (h2o_str_at_position(errbuf + strlen(errbuf), config->source.base, config->source.len, parser->error_buffer[0].lineno - config->lineno + 1, parser->error_buffer[0].column) != 0) { /* remove trailing "\n\n" in case we failed to append the source code at the error location */ errbuf[strlen(errbuf) - 2] = '\0'; } goto Exit; } /* generate code */ if ((proc = mrb_generate_code(mrb, parser)) == NULL) { fprintf(stderr, "%s: internal error (mrb_generate_code failed)\n", H2O_MRUBY_MODULE_NAME); abort(); } result = mrb_run(mrb, proc, mrb_top_self(mrb)); if (mrb->exc != NULL) { mrb_value obj = mrb_funcall(mrb, mrb_obj_value(mrb->exc), "inspect", 0); struct RString *error = mrb_str_ptr(obj); snprintf(errbuf, 256, "%s", error->as.heap.ptr); mrb->exc = 0; result = mrb_nil_value(); goto Exit; } else if (mrb_nil_p(result)) { snprintf(errbuf, 256, "returned value is not callable"); goto Exit; } Exit: mrb_parser_free(parser); mrbc_context_free(mrb, cxt); return result; } static h2o_iovec_t convert_header_name_to_env(h2o_mem_pool_t *pool, const char *name, size_t len) { #define KEY_PREFIX "HTTP_" #define KEY_PREFIX_LEN (sizeof(KEY_PREFIX) - 1) h2o_iovec_t ret; ret.len = len + KEY_PREFIX_LEN; ret.base = h2o_mem_alloc_pool(pool, ret.len); memcpy(ret.base, KEY_PREFIX, KEY_PREFIX_LEN); char *d = ret.base + KEY_PREFIX_LEN; for (; len != 0; ++name, --len) *d++ = *name == '-' ? '_' : h2o_toupper(*name); return ret; #undef KEY_PREFIX #undef KEY_PREFIX_LEN } static mrb_value build_constants(mrb_state *mrb, const char *server_name, size_t server_name_len) { mrb_value ary = mrb_ary_new_capa(mrb, H2O_MRUBY_NUM_CONSTANTS); mrb_int i; int gc_arena = mrb_gc_arena_save(mrb); { h2o_mem_pool_t pool; h2o_mem_init_pool(&pool); for (i = 0; i != H2O_MAX_TOKENS; ++i) { const h2o_token_t *token = h2o__tokens + i; mrb_value lit = mrb_nil_value(); if (token == H2O_TOKEN_CONTENT_TYPE) { lit = mrb_str_new_lit(mrb, "CONTENT_TYPE"); } else if (token->buf.len != 0) { h2o_iovec_t n = convert_header_name_to_env(&pool, token->buf.base, token->buf.len); lit = mrb_str_new(mrb, n.base, n.len); } if (mrb_string_p(lit)) { FREEZE_STRING(lit); mrb_ary_set(mrb, ary, i, lit); } } h2o_mem_clear_pool(&pool); } #define SET_STRING(idx, value) \ do { \ mrb_value lit = (value); \ FREEZE_STRING(lit); \ mrb_ary_set(mrb, ary, idx, lit); \ } while (0) #define SET_LITERAL(idx, str) SET_STRING(idx, mrb_str_new_lit(mrb, str)) SET_LITERAL(H2O_MRUBY_LIT_REQUEST_METHOD, "REQUEST_METHOD"); SET_LITERAL(H2O_MRUBY_LIT_SCRIPT_NAME, "SCRIPT_NAME"); SET_LITERAL(H2O_MRUBY_LIT_PATH_INFO, "PATH_INFO"); SET_LITERAL(H2O_MRUBY_LIT_QUERY_STRING, "QUERY_STRING"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_NAME, "SERVER_NAME"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_ADDR, "SERVER_ADDR"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_PORT, "SERVER_PORT"); SET_LITERAL(H2O_MRUBY_LIT_CONTENT_LENGTH, "CONTENT_LENGTH"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_ADDR, "REMOTE_ADDR"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_PORT, "REMOTE_PORT"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_USER, "REMOTE_USER"); SET_LITERAL(H2O_MRUBY_LIT_RACK_URL_SCHEME, "rack.url_scheme"); SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTITHREAD, "rack.multithread"); SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTIPROCESS, "rack.multiprocess"); SET_LITERAL(H2O_MRUBY_LIT_RACK_RUN_ONCE, "rack.run_once"); SET_LITERAL(H2O_MRUBY_LIT_RACK_HIJACK_, "rack.hijack?"); SET_LITERAL(H2O_MRUBY_LIT_RACK_INPUT, "rack.input"); SET_LITERAL(H2O_MRUBY_LIT_RACK_ERRORS, "rack.errors"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_SOFTWARE, "SERVER_SOFTWARE"); SET_STRING(H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE, mrb_str_new(mrb, server_name, server_name_len)); SET_LITERAL(H2O_MRUBY_LIT_SEPARATOR_COMMA, ", "); SET_LITERAL(H2O_MRUBY_LIT_SEPARATOR_SEMICOLON, "; "); #undef SET_LITERAL #undef SET_STRING mrb_ary_set(mrb, ary, H2O_MRUBY_PROC_EACH_TO_ARRAY, h2o_mruby_eval_expr(mrb, "Proc.new do |o|\n" " a = []\n" " o.each do |x|\n" " a << x\n" " end\n" " a\n" "end")); h2o_mruby_assert(mrb); /* sends exception using H2O_MRUBY_CALLBACK_ID_EXCEPTION_RAISED */ mrb_ary_set(mrb, ary, H2O_MRUBY_PROC_APP_TO_FIBER, h2o_mruby_eval_expr(mrb, "Proc.new do |app|\n" " cached = nil\n" " Proc.new do |req|\n" " fiber = cached\n" " cached = nil\n" " if !fiber\n" " fiber = Fiber.new do\n" " self_fiber = Fiber.current\n" " req = Fiber.yield\n" " while 1\n" " begin\n" " while 1\n" " resp = app.call(req)\n" " cached = self_fiber\n" " req = Fiber.yield(resp)\n" " end\n" " rescue => e\n" " cached = self_fiber\n" " req = Fiber.yield([-1, e])\n" " end\n" " end\n" " end\n" " fiber.resume\n" " end\n" " fiber.resume(req)\n" " end\n" "end")); h2o_mruby_assert(mrb); h2o_mruby_eval_expr(mrb, "module Kernel\n" " def _h2o_create_resumer()\n" " me = Fiber.current\n" " Proc.new do |v|\n" " me.resume(v)\n" " end\n" " end\n" "end"); h2o_mruby_assert(mrb); mrb_gc_arena_restore(mrb, gc_arena); return ary; } static void on_context_init(h2o_handler_t *_handler, h2o_context_t *ctx) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_mem_alloc(sizeof(*handler_ctx)); handler_ctx->handler = handler; /* init mruby in every thread */ if ((handler_ctx->mrb = mrb_open()) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } handler_ctx->constants = build_constants(handler_ctx->mrb, ctx->globalconf->server_name.base, ctx->globalconf->server_name.len); handler_ctx->symbols.sym_call = mrb_intern_lit(handler_ctx->mrb, "call"); handler_ctx->symbols.sym_close = mrb_intern_lit(handler_ctx->mrb, "close"); handler_ctx->symbols.sym_method = mrb_intern_lit(handler_ctx->mrb, "method"); handler_ctx->symbols.sym_headers = mrb_intern_lit(handler_ctx->mrb, "headers"); handler_ctx->symbols.sym_body = mrb_intern_lit(handler_ctx->mrb, "body"); handler_ctx->symbols.sym_async = mrb_intern_lit(handler_ctx->mrb, "async"); h2o_mruby_send_chunked_init_context(handler_ctx); h2o_mruby_http_request_init_context(handler_ctx); /* compile code (must be done for each thread) */ int arena = mrb_gc_arena_save(handler_ctx->mrb); mrb_value proc = h2o_mruby_compile_code(handler_ctx->mrb, &handler->config, NULL); handler_ctx->proc = mrb_funcall_argv(handler_ctx->mrb, mrb_ary_entry(handler_ctx->constants, H2O_MRUBY_PROC_APP_TO_FIBER), handler_ctx->symbols.sym_call, 1, &proc); h2o_mruby_assert(handler_ctx->mrb); mrb_gc_arena_restore(handler_ctx->mrb, arena); mrb_gc_protect(handler_ctx->mrb, handler_ctx->proc); h2o_context_set_handler_context(ctx, &handler->super, handler_ctx); } static void on_context_dispose(h2o_handler_t *_handler, h2o_context_t *ctx) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_context_get_handler_context(ctx, &handler->super); if (handler_ctx == NULL) return; mrb_close(handler_ctx->mrb); free(handler_ctx); } static void on_handler_dispose(h2o_handler_t *_handler) { h2o_mruby_handler_t *handler = (void *)_handler; free(handler->config.source.base); free(handler->config.path); free(handler); } static void report_exception(h2o_req_t *req, mrb_state *mrb) { mrb_value obj = mrb_funcall(mrb, mrb_obj_value(mrb->exc), "inspect", 0); struct RString *error = mrb_str_ptr(obj); h2o_req_log_error(req, H2O_MRUBY_MODULE_NAME, "mruby raised: %s\n", error->as.heap.ptr); mrb->exc = NULL; } static void stringify_address(h2o_conn_t *conn, socklen_t (*cb)(h2o_conn_t *conn, struct sockaddr *), mrb_state *mrb, mrb_value *host, mrb_value *port) { struct sockaddr_storage ss; socklen_t sslen; char buf[NI_MAXHOST]; *host = mrb_nil_value(); *port = mrb_nil_value(); if ((sslen = cb(conn, (void *)&ss)) == 0) return; size_t l = h2o_socket_getnumerichost((void *)&ss, sslen, buf); if (l != SIZE_MAX) *host = mrb_str_new(mrb, buf, l); int32_t p = h2o_socket_getport((void *)&ss); if (p != -1) { l = (int)sprintf(buf, "%" PRIu16, (uint16_t)p); *port = mrb_str_new(mrb, buf, l); } } static void on_rack_input_free(mrb_state *mrb, const char *base, mrb_int len, void *_input_stream) { /* reset ref to input_stream */ mrb_value *input_stream = _input_stream; *input_stream = mrb_nil_value(); } static int build_env_sort_header_cb(const void *_x, const void *_y) { const h2o_header_t *x = (const h2o_header_t *)_x, *y = (const h2o_header_t *)_y; if (x->name->len < y->name->len) return -1; if (x->name->len > y->name->len) return 1; if (x->name->base == y->name->base) return 0; return memcmp(x->name->base, y->name->base, x->name->len); } static mrb_value build_env(h2o_mruby_generator_t *generator) { mrb_state *mrb = generator->ctx->mrb; mrb_value env = mrb_hash_new_capa(mrb, 16); /* environment */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_REQUEST_METHOD), mrb_str_new(mrb, generator->req->method.base, generator->req->method.len)); size_t confpath_len_wo_slash = generator->req->pathconf->path.len; if (generator->req->pathconf->path.base[generator->req->pathconf->path.len - 1] == '/') --confpath_len_wo_slash; mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SCRIPT_NAME), mrb_str_new(mrb, generator->req->pathconf->path.base, confpath_len_wo_slash)); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_PATH_INFO), mrb_str_new(mrb, generator->req->path_normalized.base + confpath_len_wo_slash, generator->req->path_normalized.len - confpath_len_wo_slash)); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_QUERY_STRING), generator->req->query_at != SIZE_MAX ? mrb_str_new(mrb, generator->req->path.base + generator->req->query_at + 1, generator->req->path.len - (generator->req->query_at + 1)) : mrb_str_new_lit(mrb, "")); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_NAME), mrb_str_new(mrb, generator->req->hostconf->authority.host.base, generator->req->hostconf->authority.host.len)); { mrb_value h, p; stringify_address(generator->req->conn, generator->req->conn->callbacks->get_sockname, mrb, &h, &p); if (!mrb_nil_p(h)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_ADDR), h); if (!mrb_nil_p(p)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_PORT), p); } mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_TOKEN_HOST - h2o__tokens), mrb_str_new(mrb, generator->req->authority.base, generator->req->authority.len)); if (generator->req->entity.base != NULL) { char buf[32]; int l = sprintf(buf, "%zu", generator->req->entity.len); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_CONTENT_LENGTH), mrb_str_new(mrb, buf, l)); generator->rack_input = mrb_input_stream_value(mrb, NULL, 0); mrb_input_stream_set_data(mrb, generator->rack_input, generator->req->entity.base, (mrb_int)generator->req->entity.len, 0, on_rack_input_free, &generator->rack_input); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_INPUT), generator->rack_input); } { mrb_value h, p; stringify_address(generator->req->conn, generator->req->conn->callbacks->get_peername, mrb, &h, &p); if (!mrb_nil_p(h)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_REMOTE_ADDR), h); if (!mrb_nil_p(p)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_REMOTE_PORT), p); } { size_t i; for (i = 0; i != generator->req->env.size; i += 2) { h2o_iovec_t *name = generator->req->env.entries + i, *value = name + 1; mrb_hash_set(mrb, env, mrb_str_new(mrb, name->base, name->len), mrb_str_new(mrb, value->base, value->len)); } } { /* headers */ h2o_header_t *headers_sorted = alloca(sizeof(*headers_sorted) * generator->req->headers.size); memcpy(headers_sorted, generator->req->headers.entries, sizeof(*headers_sorted) * generator->req->headers.size); qsort(headers_sorted, generator->req->headers.size, sizeof(*headers_sorted), build_env_sort_header_cb); size_t i = 0; for (i = 0; i != generator->req->headers.size; ++i) { const h2o_header_t *header = headers_sorted + i; mrb_value n, v; if (h2o_iovec_is_token(header->name)) { const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, header->name); if (token == H2O_TOKEN_TRANSFER_ENCODING) continue; n = mrb_ary_entry(generator->ctx->constants, (mrb_int)(token - h2o__tokens)); } else { h2o_iovec_t vec = convert_header_name_to_env(&generator->req->pool, header->name->base, header->name->len); n = mrb_str_new(mrb, vec.base, vec.len); } v = mrb_str_new(mrb, header->value.base, header->value.len); while (i < generator->req->headers.size - 1) { if (!h2o_memis(headers_sorted[i + 1].name->base, headers_sorted[i + 1].name->len, header->name->base, header->name->len)) break; header = headers_sorted + ++i; v = mrb_str_append(mrb, v, mrb_ary_entry(generator->ctx->constants, header->name == &H2O_TOKEN_COOKIE->buf ? H2O_MRUBY_LIT_SEPARATOR_SEMICOLON : H2O_MRUBY_LIT_SEPARATOR_COMMA)); v = mrb_str_append(mrb, v, mrb_str_new(mrb, header->value.base, header->value.len)); } mrb_hash_set(mrb, env, n, v); } } /* rack.* */ /* TBD rack.version? */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_URL_SCHEME), mrb_str_new(mrb, generator->req->scheme->name.base, generator->req->scheme->name.len)); /* we are using shared-none architecture, and therefore declare ourselves as multiprocess */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_MULTITHREAD), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_MULTIPROCESS), mrb_true_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_RUN_ONCE), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_HIJACK_), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_ERRORS), mrb_gv_get(mrb, mrb_intern_lit(mrb, "$stderr"))); /* server name */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE), mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE)); return env; } static int handle_response_header(h2o_mruby_context_t *handler_ctx, h2o_iovec_t name, h2o_iovec_t value, void *_req) { h2o_req_t *req = _req; const h2o_token_t *token; static const h2o_iovec_t fallthru_set_prefix = {H2O_STRLIT(FALLTHRU_SET_PREFIX)}; /* convert name to lowercase */ name = h2o_strdup(&req->pool, name.base, name.len); h2o_strtolower(name.base, name.len); if ((token = h2o_lookup_token(name.base, name.len)) != NULL) { if (token->proxy_should_drop) { /* skip */ } else if (token == H2O_TOKEN_CONTENT_LENGTH) { req->res.content_length = h2o_strtosize(value.base, value.len); } else { if (token == H2O_TOKEN_LINK) h2o_push_path_in_link_header(req, value.base, value.len); value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header(&req->pool, &req->res.headers, token, value.base, value.len); } } else if (name.len > fallthru_set_prefix.len && h2o_memis(name.base, fallthru_set_prefix.len, fallthru_set_prefix.base, fallthru_set_prefix.len)) { /* register environment variables (with the name converted to uppercase, and using `_`) */ size_t i; name.base += fallthru_set_prefix.len; name.len -= fallthru_set_prefix.len; for (i = 0; i != name.len; ++i) name.base[i] = name.base[i] == '-' ? '_' : h2o_toupper(name.base[i]); h2o_iovec_t *slot = h2o_req_getenv(req, name.base, name.len, 1); *slot = h2o_strdup(&req->pool, value.base, value.len); } else { value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, value.base, value.len); } return 0; } static void clear_rack_input(h2o_mruby_generator_t *generator) { if (!mrb_nil_p(generator->rack_input)) mrb_input_stream_set_data(generator->ctx->mrb, generator->rack_input, NULL, -1, 0, NULL, NULL); } static void on_generator_dispose(void *_generator) { h2o_mruby_generator_t *generator = _generator; clear_rack_input(generator); generator->req = NULL; if (generator->chunked != NULL) h2o_mruby_send_chunked_dispose(generator); } static int on_req(h2o_handler_t *_handler, h2o_req_t *req) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_context_get_handler_context(req->conn->ctx, &handler->super); int gc_arena = mrb_gc_arena_save(handler_ctx->mrb); h2o_mruby_generator_t *generator = h2o_mem_alloc_shared(&req->pool, sizeof(*generator), on_generator_dispose); generator->super.proceed = NULL; generator->super.stop = NULL; generator->req = req; generator->ctx = h2o_context_get_handler_context(req->conn->ctx, &handler->super); generator->rack_input = mrb_nil_value(); generator->chunked = NULL; mrb_value env = build_env(generator); int is_delegate = 0; h2o_mruby_run_fiber(generator, generator->ctx->proc, env, &is_delegate); mrb_gc_arena_restore(handler_ctx->mrb, gc_arena); if (is_delegate) return -1; return 0; } static void send_response(h2o_mruby_generator_t *generator, mrb_int status, mrb_value resp, int *is_delegate) { mrb_state *mrb = generator->ctx->mrb; mrb_value body; h2o_iovec_t content = {NULL}; /* set status */ generator->req->res.status = (int)status; /* set headers */ if (h2o_mruby_iterate_headers(generator->ctx, mrb_ary_entry(resp, 1), handle_response_header, generator->req) != 0) { assert(mrb->exc != NULL); goto GotException; } /* return without processing body, if status is fallthru */ if (generator->req->res.status == STATUS_FALLTHRU) { if (is_delegate != NULL) *is_delegate = 1; else h2o_delegate_request_deferred(generator->req, &generator->ctx->handler->super); return; } /* obtain body */ body = mrb_ary_entry(resp, 2); /* flatten body if possible */ if (mrb_array_p(body)) { mrb_int i, len = mrb_ary_len(mrb, body); /* calculate the length of the output, while at the same time converting the elements of the output array to string */ content.len = 0; for (i = 0; i != len; ++i) { mrb_value e = mrb_ary_entry(body, i); if (!mrb_string_p(e)) { e = h2o_mruby_to_str(mrb, e); if (mrb->exc != NULL) goto GotException; mrb_ary_set(mrb, body, i, e); } content.len += RSTRING_LEN(e); } /* allocate memory, and copy the response */ char *dst = content.base = h2o_mem_alloc_pool(&generator->req->pool, content.len); for (i = 0; i != len; ++i) { mrb_value e = mrb_ary_entry(body, i); assert(mrb_string_p(e)); memcpy(dst, RSTRING_PTR(e), RSTRING_LEN(e)); dst += RSTRING_LEN(e); } /* reset body to nil, now that we have read all data */ body = mrb_nil_value(); } /* use fiber in case we need to call #each */ if (!mrb_nil_p(body)) { h2o_start_response(generator->req, &generator->super); mrb_value receiver = h2o_mruby_send_chunked_init(generator, body); if (!mrb_nil_p(receiver)) h2o_mruby_run_fiber(generator, receiver, body, 0); return; } /* send the entire response immediately */ if (h2o_memis(generator->req->input.method.base, generator->req->input.method.len, H2O_STRLIT("HEAD"))) { h2o_start_response(generator->req, &generator->super); h2o_send(generator->req, NULL, 0, 1); } else { if (content.len < generator->req->res.content_length) { generator->req->res.content_length = content.len; } else { content.len = generator->req->res.content_length; } h2o_start_response(generator->req, &generator->super); h2o_send(generator->req, &content, 1, 1); } return; GotException: report_exception(generator->req, mrb); h2o_send_error_500(generator->req, "Internal Server Error", "Internal Server Error", 0); } void h2o_mruby_run_fiber(h2o_mruby_generator_t *generator, mrb_value receiver, mrb_value input, int *is_delegate) { mrb_state *mrb = generator->ctx->mrb; mrb_value output; mrb_int status; if (!mrb_obj_eq(mrb, generator->ctx->proc, receiver)) { mrb_gc_unregister(mrb, receiver); mrb_gc_protect(mrb, receiver); } h2o_mruby_current_generator = generator; while (1) { /* send input to fiber */ output = mrb_funcall_argv(mrb, receiver, generator->ctx->symbols.sym_call, 1, &input); if (mrb->exc != NULL) goto GotException; if (!mrb_array_p(output)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "rack app did not return an array")); goto GotException; } /* fetch status */ mrb_value v = mrb_to_int(mrb, mrb_ary_entry(output, 0)); if (mrb->exc != NULL) goto GotException; status = mrb_fixnum(v); /* take special action depending on the status code */ if (status < 0) { if (status == H2O_MRUBY_CALLBACK_ID_EXCEPTION_RAISED) { mrb->exc = mrb_obj_ptr(mrb_ary_entry(output, 1)); goto GotException; } receiver = mrb_ary_entry(output, 1); int next_action = H2O_MRUBY_CALLBACK_NEXT_ACTION_IMMEDIATE; mrb_value args = mrb_ary_entry(output, 2); if (mrb_array_p(args)) { switch (status) { case H2O_MRUBY_CALLBACK_ID_SEND_CHUNKED_EOS: input = h2o_mruby_send_chunked_eos_callback(generator, receiver, args, &next_action); break; case H2O_MRUBY_CALLBACK_ID_HTTP_JOIN_RESPONSE: input = h2o_mruby_http_join_response_callback(generator, receiver, args, &next_action); break; case H2O_MRUBY_CALLBACK_ID_HTTP_FETCH_CHUNK: input = h2o_mruby_http_fetch_chunk_callback(generator, receiver, args, &next_action); break; default: input = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpected callback id sent from rack app"); break; } } else { input = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "callback from rack app did not receive an array arg"); } switch (next_action) { case H2O_MRUBY_CALLBACK_NEXT_ACTION_STOP: return; case H2O_MRUBY_CALLBACK_NEXT_ACTION_ASYNC: goto Async; default: assert(next_action == H2O_MRUBY_CALLBACK_NEXT_ACTION_IMMEDIATE); break; } goto Next; } /* if no special actions were necessary, then the output is a rack response */ break; Next: mrb_gc_protect(mrb, receiver); mrb_gc_protect(mrb, input); } h2o_mruby_current_generator = NULL; if (!(100 <= status && status <= 999)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "status returned from rack app is out of range")); goto GotException; } /* send the response (unless req is already closed) */ if (generator->req == NULL) return; if (generator->req->_generator != NULL) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpectedly received a rack response")); goto GotException; } send_response(generator, status, output, is_delegate); return; GotException: h2o_mruby_current_generator = NULL; if (generator->req != NULL) { report_exception(generator->req, mrb); if (generator->req->_generator == NULL) { h2o_send_error_500(generator->req, "Internal Server Error", "Internal Server Error", 0); } else { h2o_mruby_send_chunked_close(generator); } } return; Async: h2o_mruby_current_generator = NULL; if (!mrb_obj_eq(mrb, generator->ctx->proc, receiver)) mrb_gc_register(mrb, receiver); return; } h2o_mruby_handler_t *h2o_mruby_register(h2o_pathconf_t *pathconf, h2o_mruby_config_vars_t *vars) { h2o_mruby_handler_t *handler = (void *)h2o_create_handler(pathconf, sizeof(*handler)); handler->super.on_context_init = on_context_init; handler->super.on_context_dispose = on_context_dispose; handler->super.dispose = on_handler_dispose; handler->super.on_req = on_req; handler->config.source = h2o_strdup(NULL, vars->source.base, vars->source.len); if (vars->path != NULL) handler->config.path = h2o_strdup(NULL, vars->path, SIZE_MAX).base; return handler; } mrb_value h2o_mruby_each_to_array(h2o_mruby_context_t *handler_ctx, mrb_value src) { return mrb_funcall_argv(handler_ctx->mrb, mrb_ary_entry(handler_ctx->constants, H2O_MRUBY_PROC_EACH_TO_ARRAY), handler_ctx->symbols.sym_call, 1, &src); } static int iterate_headers_handle_pair(h2o_mruby_context_t *handler_ctx, mrb_value name, mrb_value value, int (*cb)(h2o_mruby_context_t *, h2o_iovec_t, h2o_iovec_t, void *), void *cb_data) { /* convert name and value to string */ name = h2o_mruby_to_str(handler_ctx->mrb, name); if (handler_ctx->mrb->exc != NULL) return -1; value = h2o_mruby_to_str(handler_ctx->mrb, value); if (handler_ctx->mrb->exc != NULL) return -1; /* call the callback, splitting the values with '\n' */ const char *vstart = RSTRING_PTR(value), *vend = vstart + RSTRING_LEN(value), *eol; while (1) { for (eol = vstart; eol != vend; ++eol) if (*eol == '\n') break; if (cb(handler_ctx, h2o_iovec_init(RSTRING_PTR(name), RSTRING_LEN(name)), h2o_iovec_init(vstart, eol - vstart), cb_data) != 0) return -1; if (eol == vend) break; vstart = eol + 1; } return 0; } int h2o_mruby_iterate_headers(h2o_mruby_context_t *handler_ctx, mrb_value headers, int (*cb)(h2o_mruby_context_t *, h2o_iovec_t, h2o_iovec_t, void *), void *cb_data) { mrb_state *mrb = handler_ctx->mrb; if (!(mrb_hash_p(headers) || mrb_array_p(headers))) { headers = h2o_mruby_each_to_array(handler_ctx, headers); if (mrb->exc != NULL) return -1; assert(mrb_array_p(headers)); } if (mrb_hash_p(headers)) { mrb_value keys = mrb_hash_keys(mrb, headers); mrb_int i, len = mrb_ary_len(mrb, keys); for (i = 0; i != len; ++i) { mrb_value k = mrb_ary_entry(keys, i); mrb_value v = mrb_hash_get(mrb, headers, k); if (iterate_headers_handle_pair(handler_ctx, k, v, cb, cb_data) != 0) return -1; } } else { assert(mrb_array_p(headers)); mrb_int i, len = mrb_ary_len(mrb, headers); for (i = 0; i != len; ++i) { mrb_value pair = mrb_ary_entry(headers, i); if (!mrb_array_p(pair)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "array element of headers MUST by an array")); return -1; } if (iterate_headers_handle_pair(handler_ctx, mrb_ary_entry(pair, 0), mrb_ary_entry(pair, 1), cb, cb_data) != 0) return -1; } } return 0; }
1
11,302
Just one more thing... How about doing all of the following? 1. renaming h2o.rb to bootstrap.rb 2. require and include the acl code _from_ bootstrap.rb
h2o-h2o
c
@@ -82,14 +82,6 @@ double get_timestamp (void) return ((double) tm.tv_sec + (tm.tv_nsec/1.0e9)); } -static void timeout_cb (flux_t *h, cron_task_t *t, void *arg) -{ - cron_entry_t *e = arg; - flux_log (h, LOG_INFO, "cron-%ju: task timeout at %.2fs. Killing", - e->id, e->timeout); - cron_task_kill (t, SIGTERM); -} - static int cron_entry_run_task (cron_entry_t *e) { flux_t *h = e->ctx->h;
1
/************************************************************\ * Copyright 2016 Lawrence Livermore National Security, LLC * (c.f. AUTHORS, NOTICE.LLNS, COPYING) * * This file is part of the Flux resource manager framework. * For details, see https://github.com/flux-framework. * * SPDX-License-Identifier: LGPL-3.0 \************************************************************/ /* flux cron: cron-like service for flux */ /* */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <getopt.h> #include <libgen.h> #include <pthread.h> #include <unistd.h> #include <stdbool.h> #include <sys/time.h> #include <ctype.h> #include <signal.h> #include <flux/core.h> #include <jansson.h> #include "src/common/libczmqcontainers/czmq_containers.h" #include "src/common/libutil/log.h" #include "src/common/libutil/fsd.h" #include "task.h" #include "entry.h" #include "types.h" struct cron_ctx { flux_t * h; uint64_t next_id; /* Id for next cron entry */ char * sync_event; /* If set, sync entries to event */ flux_msg_handler_t * mh; /* sync event message handler */ zlist_t * entries; zlist_t * deferred; /* list of deferred entries */ double last_sync; /* timestamp of last sync event */ double sync_epsilon; /* allow tasks to run for this number of seconds after last- sync before deferring */ char * cwd; /* cwd to avoid constant lookups */ }; /************************************************************************** * Forward declarations */ static cron_entry_t *cron_entry_create (cron_ctx_t *ctx, const flux_msg_t *msg); static void cron_entry_destroy (cron_entry_t *e); static int cron_entry_stop (cron_entry_t *e); static void cron_entry_finished_handler (flux_t *h, cron_task_t *t, void *arg); static void cron_entry_io_cb (flux_t *h, cron_task_t *t, void *arg, bool is_stderr, const char *data, int datalen); static int cron_entry_run_task (cron_entry_t *e); static int cron_entry_defer (cron_entry_t *e); /**************************************************************************/ /* Public apis */ void *cron_entry_type_data (cron_entry_t *e) { return e->data; } double get_timestamp (void) { struct timespec tm; clock_gettime (CLOCK_REALTIME, &tm); return ((double) tm.tv_sec + (tm.tv_nsec/1.0e9)); } static void timeout_cb (flux_t *h, cron_task_t *t, void *arg) { cron_entry_t *e = arg; flux_log (h, LOG_INFO, "cron-%ju: task timeout at %.2fs. Killing", e->id, e->timeout); cron_task_kill (t, SIGTERM); } static int cron_entry_run_task (cron_entry_t *e) { flux_t *h = e->ctx->h; if (cron_task_run (e->task, e->rank, e->command, e->cwd, e->env) < 0) { flux_log_error (h, "cron-%ju: cron_task_run", e->id); /* Run "finished" handler since this task is done */ cron_entry_finished_handler (h, e->task, e); return (-1); } e->stats.lastrun = get_timestamp (); return (0); } static int cron_entry_increment (cron_entry_t *e) { ++e->stats.total; return ++e->stats.count; } int cron_entry_schedule_task (cron_entry_t *e) { flux_t *h = e->ctx->h; /* Refuse to run more than one task at once */ if (e->task) { flux_log (h, LOG_INFO, "cron-%ju: %s: task still running or scheduled", e->id, e->name); return (0); } if (!(e->task = cron_task_new (h, cron_entry_finished_handler, e))) { flux_log_error (h, "cron_task_create"); return -1; } cron_task_on_io (e->task, cron_entry_io_cb); if (e->timeout >= 0.0) cron_task_set_timeout (e->task, e->timeout, timeout_cb); /* if we've reached our (non-zero) repeat count, prematurely stop * the current entry (i.e. remove it from event loop, but leave * it in ctx->entries so it can be listed/queried) */ if (cron_entry_increment (e) == e->repeat) cron_entry_stop (e); return cron_entry_defer (e); } /**************************************************************************/ static void cron_entry_io_cb (flux_t *h, cron_task_t *t, void *arg, bool is_stderr, const char *data, int datalen) { cron_entry_t *e = arg; int level = is_stderr ? LOG_ERR : LOG_INFO; flux_log (h, level, "cron-%ju[%s]: rank=%d: command=\"%s\": \"%s\"", e->id, e->name, e->rank, e->command, data); } /* Push task t onto the front of the completed tasks list for * entry e. If the list has grown past completed-task-size, then drop the * tail task on the list. */ int cron_entry_push_finished_task (cron_entry_t *e, struct cron_task *t) { if (zlist_push (e->finished_tasks, t) < 0) return (-1); if (zlist_size (e->finished_tasks) > e->task_history_count) { struct cron_task *tdel = zlist_tail (e->finished_tasks); if (tdel) { zlist_remove (e->finished_tasks, tdel); cron_task_destroy (tdel); } } return (0); } static void cron_entry_failure (cron_entry_t *e) { e->stats.failure++; e->stats.failcount++; if (e->stop_on_failure && e->stats.failcount >= e->stop_on_failure) { flux_log (e->ctx->h, LOG_ERR, "cron-%ju: exceeded failure limit of %d. stopping", e->id, e->stop_on_failure); cron_entry_stop (e); } } static void cron_entry_finished_handler (flux_t *h, cron_task_t *t, void *arg) { cron_entry_t *e = arg; if (strcmp (cron_task_state (t), "Exec Failure") == 0) { flux_log_error (h, "cron-%ju: failed to run %s", e->id, e->command); cron_entry_failure (e); } else if (strcmp (cron_task_state (t), "Rexec Failure") == 0) { flux_log_error (h, "cron-%ju: failure running %s", e->id, e->command); cron_entry_failure (e); } else if (cron_task_status (t) != 0) { flux_log (h, LOG_ERR, "cron-%ju: \"%s\": Failed: %s", e->id, e->command, cron_task_state (t)); cron_entry_failure (e); } else e->stats.success++; /* * Push the completed task onto the completed task list and * drop a task if needed. Reset e->task to NULL since there * is currently no active task. */ if (cron_entry_push_finished_task (e, t) < 0) return; e->task = NULL; /* * If destruction of this cron entry has been requested, complete * the destroy here. */ if (e->destroyed) cron_entry_destroy (e); } static int cron_entry_stop (cron_entry_t *e) { if (!e->data || e->stopped) { errno = EINVAL; return (-1); } e->ops.stop (e->data); e->stopped = 1; return (0); } /* * Callback used to stop a cron entry safely. */ static void entry_stop_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { cron_entry_stop (arg); flux_watcher_stop (w); flux_watcher_destroy (w); } /* Stop cron entry `e` "safely" by waiting until the next * "prepare" callback. Temporary watcher created here wil lbe * destroyed within prepare_cb. */ int cron_entry_stop_safe (cron_entry_t *e) { flux_reactor_t *r = flux_get_reactor (e->ctx->h); flux_watcher_t *w = flux_prepare_watcher_create (r, entry_stop_cb, e); if (!w) return (-1); flux_watcher_start (w); return (0); } static int cron_entry_start (cron_entry_t *e) { if (!e->data || !e->stopped) { errno = EINVAL; return (-1); } e->ops.start (e->data); e->stats.starttime = get_timestamp (); e->stats.count = 0; e->stats.failcount = 0; e->stopped = 0; return (0); } static void cron_entry_destroy (cron_entry_t *e) { struct cron_task *t; if (e == NULL) return; /* * Stop this entry first, then set a destroyed flag in the case we * still have a task running */ cron_entry_stop (e); e->destroyed = 1; /* * If we have a task still running, we have to leave cron_entry * around until the task is complete. */ if (e->task) return; /* * Before destroying entry, remove it from entries list: */ if (e->ctx && e->ctx->entries) zlist_remove (e->ctx->entries, e); if (e->data) { e->ops.destroy (e->data); e->data = NULL; } free (e->name); free (e->command); free (e->typename); free (e->cwd); if (e->env) json_decref (e->env); if (e->finished_tasks) { t = zlist_first (e->finished_tasks); while (t) { cron_task_destroy (t); t = zlist_next (e->finished_tasks); } zlist_destroy (&e->finished_tasks); } free (e); } static void deferred_cb (flux_t *h, flux_msg_handler_t *mh, const flux_msg_t *msg, void *arg) { cron_ctx_t *ctx = arg; cron_entry_t *e; while ((e = zlist_pop (ctx->deferred))) cron_entry_run_task (e); flux_msg_handler_stop (ctx->mh); ctx->last_sync = get_timestamp (); } static int cron_entry_defer (cron_entry_t *e) { cron_ctx_t *ctx = e->ctx; double now = get_timestamp (); /* If no default synchronization event or the time since the last * sync event is very short, then run task immediately */ if (!ctx->mh || (now - ctx->last_sync) < ctx->sync_epsilon) return cron_entry_run_task (e); /* O/w, defer this task: push entry onto deferred list, and start * sync event message handler if needed */ if (zlist_push (ctx->deferred, e) < 0) return (-1); e->stats.deferred++; flux_log (ctx->h, LOG_DEBUG, "deferring cron-%ju to next %s event", e->id, ctx->sync_event); if (zlist_size (ctx->deferred) == 1) flux_msg_handler_start (ctx->mh); return (0); } static void cron_stats_init (struct cron_stats *s) { memset (s, 0, sizeof (*s)); s->ctime = get_timestamp (); } /* * Create a new cron entry from JSON blob */ static cron_entry_t *cron_entry_create (cron_ctx_t *ctx, const flux_msg_t *msg) { flux_t *h = ctx->h; cron_entry_t *e = NULL; json_t *args = NULL; const char *name; const char *command; const char *type; const char *cwd = NULL; int saved_errno = EPROTO; /* Get required fields "type", "name" and "command" */ if (flux_msg_unpack (msg, "{ s:s, s:s, s:s, s:O }", "type", &type, "name", &name, "command", &command, "args", &args) < 0) { flux_log_error (h, "cron.create: Failed to get name/command/args"); goto done; } saved_errno = ENOMEM; if ((e = calloc (1, sizeof (*e))) == NULL) { flux_log_error (h, "cron.create: Out of memory"); goto done; } e->id = ctx->next_id++;; e->ctx = ctx; e->stopped = 1; if (!(e->name = strdup (name)) || !(e->command = strdup (command))) { saved_errno = errno; goto out_err; } cron_stats_init (&e->stats); /* * Set defaults for optional fields: */ e->repeat = 0; /* Max number of times we'll run target command (0 = inf) */ e->rank = 0; /* Rank on which to run commands (default = 0) */ e->task_history_count = 1; /* Default number of entries in history list */ e->stop_on_failure = 0; /* Whether the cron job is stopped on failure */ e->timeout = -1.0; /* Task timeout (default -1, no timeout) */ if (flux_msg_unpack (msg, "{ s?O, s?s, s?i, s?i, s?i, s?i, s?F }", "environ", &e->env, "cwd", &cwd, "repeat", &e->repeat, "rank", &e->rank, "task-history-count", &e->task_history_count, "stop-on-failure", &e->stop_on_failure, "timeout", &e->timeout) < 0) { saved_errno = EPROTO; flux_log_error (h, "cron.create: flux_msg_unpack"); goto out_err; } if (!cwd) cwd = ctx->cwd; if ((e->cwd = strdup (cwd)) == NULL) { flux_log_error (h, "cron.create: strdup (cwd)"); errno = ENOMEM; goto out_err; } /* List for all completed tasks up to task-history-count */ if (!(e->finished_tasks = zlist_new ())) { saved_errno = errno; flux_log_error (h, "cron_entry_create: zlist_new"); goto out_err; } /* * Now, create type-specific data for this entry from type * name and type-specific data in "args" key: */ if (cron_type_operations_lookup (type, &e->ops) < 0) { saved_errno = ENOSYS; /* year,month,day,etc. not supported */ goto out_err; } if ((e->typename = strdup (type)) == NULL) { saved_errno = errno; goto out_err; } if (!(e->data = e->ops.create (h, e, args))) { flux_log_error (h, "ops.create"); saved_errno = errno; goto out_err; } json_decref (args); // Start the entry watcher for this type: cron_entry_start (e); done: return (e); out_err: cron_entry_destroy (e); errno = saved_errno; return (NULL); } static void cron_ctx_sync_event_stop (cron_ctx_t *ctx) { if (ctx->sync_event) { if (flux_event_unsubscribe (ctx->h, ctx->sync_event) < 0) flux_log_error (ctx->h, "destroy: flux_event_unsubscribe\n"); flux_msg_handler_destroy (ctx->mh); ctx->mh = NULL; free (ctx->sync_event); ctx->sync_event = NULL; } } static void cron_ctx_destroy (cron_ctx_t *ctx) { if (ctx == NULL) return; cron_ctx_sync_event_stop (ctx); if (ctx->entries) { cron_entry_t *e; while ((e = zlist_pop (ctx->entries))) cron_entry_destroy (e); zlist_destroy (&ctx->entries); } if (ctx->deferred) zlist_destroy (&ctx->deferred); free (ctx->cwd); free (ctx); } static int cron_ctx_sync_event_init (cron_ctx_t *ctx, const char *topic) { struct flux_match match = FLUX_MATCH_EVENT; flux_log (ctx->h, LOG_INFO, "synchronizing cron tasks to event %s", topic); if ((ctx->sync_event = strdup (topic)) == NULL) { flux_log_error (ctx->h, "sync_event_init: strdup"); return (-1); } match.topic_glob = ctx->sync_event; ctx->mh = flux_msg_handler_create (ctx->h, match, deferred_cb, (void *) ctx); if (!ctx->mh) { flux_log_error (ctx->h, "sync_event_init: msg_handler_create"); return (-1); } if (flux_event_subscribe (ctx->h, topic) < 0) { flux_log_error (ctx->h, "sync_event_init: subscribe (%s)", topic); return (-1); } /* Do not start the handler until we have entries on the the list */ return (0); } static cron_ctx_t * cron_ctx_create (flux_t *h) { cron_ctx_t *ctx = calloc (1, sizeof (*ctx)); if (ctx == NULL) { flux_log_error (h, "cron_ctx_create"); goto error; } ctx->sync_event = NULL; ctx->last_sync = 0.0; ctx->next_id = 1; /* Default: run synchronized events up to 15ms after sync event */ ctx->sync_epsilon = 0.015; ctx->mh = NULL; if (!(ctx->entries = zlist_new ()) || !(ctx->deferred = zlist_new ())) { flux_log_error (h, "cron_ctx_create: zlist_new"); goto error; } if (!(ctx->cwd = get_current_dir_name ())) { flux_log_error (h, "cron_ctx_create: get_get_current_dir_name"); goto error; } ctx->h = h; return ctx; error: cron_ctx_destroy (ctx); return (NULL); } /**************************************************************************/ static json_t *cron_stats_to_json (struct cron_stats *stats) { return json_pack ("{ s:f, s:f, s:f, s:I, s:I, s:I, s:I, s:I, s:I }", "ctime", stats->ctime, "starttime", stats->starttime, "lastrun", stats->lastrun, "count", stats->count, "failcount", stats->failcount, "total", stats->total, "success", stats->success, "failure", stats->failure, "deferred", stats->deferred); } static json_t *cron_entry_to_json (cron_entry_t *e) { cron_task_t *t; json_t *o, *to; json_t *tasks; /* * Common entry contents: */ if (!(o = json_pack ("{ s:I, s:i, s:s, s:s, s:i, s:b, s:s }", "id", (json_int_t) e->id, "rank", e->rank, "name", e->name, "command", e->command, "repeat", e->repeat, "stopped", e->stopped, "type", e->typename))) return NULL; if (e->timeout >= 0.0) json_object_set_new (o, "timeout", json_real (e->timeout)); if ((to = cron_stats_to_json (&e->stats))) json_object_set_new (o, "stats", to); /* * Add type specific json blob, under typedata key: */ if ((to = e->ops.tojson (e->data))) json_object_set_new (o, "typedata", to); /* * Add all task information, starting with any current task: */ if (!(tasks = json_array ())) goto fail; if (e->task && (to = cron_task_to_json (e->task))) json_array_append_new (tasks, to); t = zlist_first (e->finished_tasks); while (t) { if ((to = cron_task_to_json (t))) json_array_append_new (tasks, to); t = zlist_next (e->finished_tasks); } json_object_set_new (o, "tasks", tasks); return (o); fail: json_decref (o); return NULL; } /**************************************************************************/ /* * Handle cron.create: create a new cron entry */ static void cron_create_handler (flux_t *h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { cron_entry_t *e; cron_ctx_t *ctx = arg; json_t *out = NULL; char *json_str = NULL; if (!(e = cron_entry_create (ctx, msg))) goto error; if (zlist_append (ctx->entries, e) < 0) { errno = ENOMEM; goto error; } if ((out = cron_entry_to_json (e))) { json_str = json_dumps (out, JSON_COMPACT); json_decref (out); } if (flux_respond (h, msg, json_str) < 0) flux_log_error (h, "cron.request: flux_respond"); free (json_str); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "cron.request: flux_respond_error"); } static void cron_sync_handler (flux_t *h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { cron_ctx_t *ctx = arg; const char *topic; int disable; double epsilon; if (flux_request_unpack (msg, NULL, "{}") < 0) goto error; if (flux_request_unpack (msg, NULL, "{ s:s }", "topic", &topic) < 0) topic = NULL; /* Disable sync-event */ if (flux_request_unpack (msg, NULL, "{ s:b }", "disable", &disable) < 0) disable = false; if (topic || disable) cron_ctx_sync_event_stop (ctx); if (topic) { if (cron_ctx_sync_event_init (ctx, topic) < 0) goto error; } if (!flux_request_unpack (msg, NULL, "{ s:F }", "sync_epsilon", &epsilon)) ctx->sync_epsilon = epsilon; if (ctx->sync_event) { if (flux_respond_pack (h, msg, "{ s:s s:f }", "sync_event", ctx->sync_event, "sync_epsilon", ctx->sync_epsilon) < 0) flux_log_error (h, "cron.request: flux_respond_pack"); } else { if (flux_respond_pack (h, msg, "{ s:b }", "sync_disabled", true) < 0) flux_log_error (h, "cron.request: flux_respond_pack"); } return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "cron.request: flux_respond_error"); } static cron_entry_t *cron_ctx_find_entry (cron_ctx_t *ctx, int64_t id) { cron_entry_t *e = zlist_first (ctx->entries); while (e && e->id != id) e = zlist_next (ctx->entries); return (e); } /* * Return a cron entry referenced by request in flux message msg. * [service] is name of service for logging purposes. */ static cron_entry_t *entry_from_request (flux_t *h, const flux_msg_t *msg, cron_ctx_t *ctx, const char *service) { int64_t id; if (flux_request_unpack (msg, NULL, "{ s:I }", "id", &id) < 0) { flux_log_error (h, "%s: request decodef", service); return NULL; } errno = ENOENT; return cron_ctx_find_entry (ctx, id); } /* * "cron.delete" handler */ static void cron_delete_handler (flux_t *h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { cron_entry_t *e; cron_ctx_t *ctx = arg; json_t *out = NULL; char *json_str = NULL; int kill = false; if (!(e = entry_from_request (h, msg, ctx, "cron.delete"))) goto error; out = cron_entry_to_json (e); if (e->task && !flux_request_unpack (msg, NULL, "{ s:b }", "kill", &kill) && kill == true) cron_task_kill (e->task, SIGTERM); cron_entry_destroy (e); if (out) json_str = json_dumps (out, JSON_COMPACT); if (flux_respond (h, msg, json_str) < 0) flux_log_error (h, "cron.delete: flux_respond"); free (json_str); json_decref (out); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "cron.delete: flux_respond_error"); } /* * "cron.stop" handler: stop a cron entry until restarted */ static void cron_stop_handler (flux_t *h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { cron_entry_t *e; cron_ctx_t *ctx = arg; json_t *out = NULL; char *json_str = NULL; if (!(e = entry_from_request (h, msg, ctx, "cron.stop"))) goto error; if (cron_entry_stop (e) < 0) goto error; if ((out = cron_entry_to_json (e))) { json_str = json_dumps (out, JSON_COMPACT); json_decref (out); } if (flux_respond (h, msg, json_str) < 0) flux_log_error (h, "cron.stop: flux_respond"); free (json_str); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "cron.stop: flux_respond_error"); } /* * "cron.start" handler: start a stopped cron entry */ static void cron_start_handler (flux_t *h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { cron_entry_t *e; cron_ctx_t *ctx = arg; json_t *out = NULL; char *json_str = NULL; if (!(e = entry_from_request (h, msg, ctx, "cron.start"))) goto error; if (cron_entry_start (e) < 0) goto error; if ((out = cron_entry_to_json (e))) { json_str = json_dumps (out, JSON_COMPACT); json_decref (out); } if (flux_respond (h, msg, json_str) < 0) flux_log_error (h, "cron.start: flux_respond"); free (json_str); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "cron.start: flux_respond_error"); } /* * Handle "cron.list" -- dump a list of current cron entries via JSON */ static void cron_ls_handler (flux_t *h, flux_msg_handler_t *w, const flux_msg_t *msg, void *arg) { cron_ctx_t *ctx = arg; cron_entry_t *e = NULL; char *json_str = NULL; json_t *out = json_object (); json_t *entries = json_array (); if (out == NULL || entries == NULL) { flux_respond_error (h, msg, ENOMEM, NULL); flux_log_error (h, "cron.list: Out of memory"); return; } e = zlist_first (ctx->entries); while (e) { json_t *entry = cron_entry_to_json (e); if (entry == NULL) flux_log_error (h, "cron_entry_to_json"); else json_array_append_new (entries, entry); e = zlist_next (ctx->entries); } json_object_set_new (out, "entries", entries); if (!(json_str = json_dumps (out, JSON_COMPACT))) flux_log_error (h, "cron.list: json_dumps"); else if (flux_respond (h, msg, json_str) < 0) flux_log_error (h, "cron.list: flux_respond"); json_decref (out); free (json_str); } /**************************************************************************/ static const struct flux_msg_handler_spec htab[] = { { FLUX_MSGTYPE_REQUEST, "cron.create", cron_create_handler, 0 }, { FLUX_MSGTYPE_REQUEST, "cron.delete", cron_delete_handler, 0 }, { FLUX_MSGTYPE_REQUEST, "cron.list", cron_ls_handler, 0 }, { FLUX_MSGTYPE_REQUEST, "cron.stop", cron_stop_handler, 0 }, { FLUX_MSGTYPE_REQUEST, "cron.start", cron_start_handler, 0 }, { FLUX_MSGTYPE_REQUEST, "cron.sync", cron_sync_handler, 0 }, FLUX_MSGHANDLER_TABLE_END, }; static void process_args (cron_ctx_t *ctx, int ac, char **av) { int i; for (i = 0; i < ac; i++) { if (strncmp (av[i], "sync=", 5) == 0) cron_ctx_sync_event_init (ctx, (av[i])+5); else if (strncmp (av[i], "sync_epsilon=", 13) == 0) { char *s = (av[i])+13; if (fsd_parse_duration (s, &ctx->sync_epsilon) < 0) flux_log_error (ctx->h, "option %s ignored", av[i]); } else flux_log (ctx->h, LOG_ERR, "Unknown option `%s'", av[i]); } } int mod_main (flux_t *h, int ac, char **av) { int rc = -1; flux_msg_handler_t **handlers = NULL; cron_ctx_t *ctx = cron_ctx_create (h); if (ctx == NULL) return -1; process_args (ctx, ac, av); if (flux_msg_handler_addvec (h, htab, ctx, &handlers) < 0) { flux_log_error (h, "flux_msg_handler_addvec"); goto done; } if ((rc = flux_reactor_run (flux_get_reactor (h), 0)) < 0) flux_log_error (h, "flux_reactor_run"); done: flux_msg_handler_delvec (handlers); cron_ctx_destroy (ctx); return rc; } MOD_NAME ("cron"); /* * vi:tabstop=4 shiftwidth=4 expandtab */
1
31,452
Commit message: "remove" is misspelled.
flux-framework-flux-core
c
@@ -60,6 +60,11 @@ public abstract class BytesValues { return value.slice(toTrim); } + public static BytesValue trimTrailingZeros(final BytesValue value) { + final int toTrim = trailingZeros(value); + return value.slice(0, value.size() - toTrim + 1); + } + /** * Returns the smallest bytes value whose bytes correspond to the provided long. That is, the * returned value may be of size less than 8 if the provided int has leading zero bytes.
1
/* * Copyright 2018 ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package org.hyperledger.besu.util.bytes; import static com.google.common.base.Preconditions.checkArgument; import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.util.Base64; /** Static utility methods to work with {@link BytesValue} and {@link MutableBytesValue}. */ public abstract class BytesValues { private static final int MAX_UNSIGNED_BYTE = (1 << 8) - 1; private static final int MAX_UNSIGNED_SHORT = (1 << 16) - 1; private static final long MAX_UNSIGNED_INT = (1L << 32) - 1; private BytesValues() {} private static byte b(final long l) { return (byte) (l & 0xFF); } /** * Returns the number of zero-valued bytes in the bytes value. * * @param value The value of which to count tye zero-valued bytes within * @return the number of zero-valued bytes in the bytes value. */ public static int countZeros(final BytesValue value) { int count = 0; for (int i = 0; i < value.size(); i++) { if (value.get(i) == 0) { ++count; } } return count; } /** * Return a slice of the provided value representing the same value but without any potential * leading zeros. * * @param value The value of which to trim leading zeros. * @return {@code value} if its left-most byte is non zero, or a slice that exclude any leading * zeros. Note that if {@code value} contains only zeros, it will return an empty value. */ public static BytesValue trimLeadingZeros(final BytesValue value) { final int toTrim = leadingZeros(value); return value.slice(toTrim); } /** * Returns the smallest bytes value whose bytes correspond to the provided long. That is, the * returned value may be of size less than 8 if the provided int has leading zero bytes. * * @param l The long from which to create the bytes value. * @return The minimal bytes representation corresponding to {@code l}. */ public static BytesValue toMinimalBytes(final long l) { if (l == 0) return BytesValue.EMPTY; final int zeros = Long.numberOfLeadingZeros(l); final int resultBytes = 8 - (zeros / 8); final byte[] result = new byte[resultBytes]; int shift = 0; for (int i = 0; i < resultBytes; i++) { result[resultBytes - i - 1] = b(l >> shift); shift += 8; } return BytesValue.wrap(result); } /** * Returns a 1 byte value corresponding to the provided value interpreted as an unsigned byte * value. * * @param v The value, which must fit an unsigned byte. * @return A single byte value corresponding to {@code v}. * @throws IllegalArgumentException if {@code v < 0} or {@code v} is too big to fit an unsigned * byte (that is, if {@code v >= (1 << 8)}). */ public static BytesValue ofUnsignedByte(final int v) { checkArgument( v >= 0 && v <= MAX_UNSIGNED_BYTE, "Value %s cannot be represented as an unsigned byte (it is negative or too big)", v); final byte[] res = new byte[1]; res[0] = b(v); return BytesValue.wrap(res); } /** * Returns a 2 bytes value corresponding to the provided value interpreted as an unsigned short * value. * * @param v The value, which must fit an unsigned short. * @return A 2 bytes value corresponding to {@code v}. * @throws IllegalArgumentException if {@code v < 0} or {@code v} is too big to fit an unsigned * 2-bytes short (that is, if {@code v >= (1 << 16)}). */ public static BytesValue ofUnsignedShort(final int v) { checkArgument( v >= 0 && v <= MAX_UNSIGNED_SHORT, "Value %s cannot be represented as an unsigned short (it is negative or too big)", v); final byte[] res = new byte[2]; res[0] = b(v >> 8); res[1] = b(v); return BytesValue.wrap(res); } /** * Returns a 4 bytes value corresponding to the provided value interpreted as an unsigned int * value. * * @param v The value, which must fit an unsigned int. * @return A 4 bytes value corresponding to {@code v}. * @throws IllegalArgumentException if {@code v < 0} or {@code v} is too big to fit an unsigned * 4-bytes int (that is, if {@code v >= (1L << 32)}). */ public static BytesValue ofUnsignedInt(final long v) { checkArgument( v >= 0 && v <= MAX_UNSIGNED_INT, "Value %s cannot be represented as an unsigned int (it is negative or too big)", v); final byte[] res = new byte[4]; res[0] = b(v >> 24); res[1] = b(v >> 16); res[2] = b(v >> 8); res[3] = b(v); return BytesValue.wrap(res); } /** * Extracts the int value corresponding to the provide bytes, which must be 4 bytes or less. * * <p>This is the inverse operation to {@link #toMinimalBytes(long)} (when the argument of said * method fits an int). * * @param value The value from which to extract the value as an int. If must be 4 bytes or less. * If it is strictly less than 4 bytes, this behave as if the value was padded with 0 on the * left to fit 4 bytes. * @return The extracted int value. * @throws IllegalArgumentException if the value has strictly more than 4 bytes. */ public static int extractInt(final BytesValue value) { final int size = value.size(); checkArgument(size <= 4, "Cannot extract an int from a value of size %s > 4", size); if (size == 0) return 0; int res = 0; int shift = 0; for (int i = 0; i < size; i++) { res |= (value.get(size - i - 1) & 0xFF) << shift; shift += 8; } return res; } /** * Extracts the long value corresponding to the provide bytes, which must be 8 bytes or less. * * <p>This is the inverse operation to {@link #toMinimalBytes(long)}. * * @param value The value from which to extract the value as a long. If must be 8 bytes or less. * If it is strictly less than 8 bytes, this behave as if the value was padded with 0 on the * left to fit 8 bytes. * @return The extracted long value. * @throws IllegalArgumentException if the value has strictly more than 8 bytes. */ public static long extractLong(final BytesValue value) { final int size = value.size(); checkArgument(size <= 8, "Cannot extract a long from a value of size %s > 8", size); if (size == 0) return 0; long res = 0; int shift = 0; for (int i = 0; i < size; i++) { res |= ((long) value.get(size - i - 1) & 0xFF) << shift; shift += 8; } return res; } /** * Creates a newly allocated value containing the concatenation of the values provided. * * @param values The value to copy/concatenate. * @return A newly allocated value containing the result of containing the value from {@code * values} in their provided order. */ public static BytesValue concatenate(final BytesValue... values) { int size = 0; for (final BytesValue value : values) { size += value.size(); } final MutableBytesValue result = MutableBytesValue.create(size); int offset = 0; for (final BytesValue value : values) { value.copyTo(result, offset); offset += value.size(); } return result; } /** * The BigInteger corresponding to interpreting the provided bytes as an unsigned integer. * * @param bytes The bytes to interpret. * @return A positive (or zero) {@link BigInteger} corresponding to this value interpreted as an * unsigned integer representation. */ public static BigInteger asUnsignedBigInteger(final BytesValue bytes) { return new BigInteger(1, bytes.getArrayUnsafe()); } /** * Decode the bytes as a UTF-8 String. * * @param bytes The bytes to decode. * @return A utf-8 string corresponding to the bytes data. */ public static String asString(final BytesValue bytes) { return new String(bytes.extractArray(), StandardCharsets.UTF_8); } /** * The BigInteger corresponding to interpreting the provided bytes as a signed integer. * * @param bytes The bytes to interpret. * @return A {@link BigInteger} corresponding to this value interpreted as a two's-complement * integer representation. */ public static BigInteger asSignedBigInteger(final BytesValue bytes) { // An empty byte value is an invalid magnitude as far as BigInteger is concerned because it // wants at least 1 sign bit. if (bytes.size() == 0) { return BigInteger.ZERO; } return new BigInteger(bytes.getArrayUnsafe()); } public static String asBase64String(final BytesValue bytesValue) { return Base64.getEncoder().encodeToString(bytesValue.extractArray()); } public static BytesValue fromBase64(final byte[] bytes) { return BytesValue.wrap(Base64.getDecoder().decode(bytes)); } public static BytesValue fromBase64(final String str) { return BytesValue.wrap(Base64.getDecoder().decode(str)); } // In Java9, this could be moved to BytesValue and made private static BytesValue fromHexString(final String str, final int destSize, final boolean lenient) { return BytesValue.wrap(fromRawHexString(str, destSize, lenient)); } static byte[] fromRawHexString( final String str, final int taintedDestSize, final boolean lenient) { String hex = str; if (str.startsWith("0x")) { hex = str.substring(2); } int len = hex.length(); int idxShift = 0; if (len % 2 != 0) { if (!lenient) { throw new IllegalArgumentException("Invalid odd-length hex binary representation " + str); } hex = "0" + hex; len += 1; idxShift = 1; } final int size = len / 2; final int destSize; if (taintedDestSize < 0) { destSize = size; } else { destSize = taintedDestSize; checkArgument( size <= destSize, "Hex value %s is too big: expected at most %s bytes but got %s", str, destSize, size); } final byte[] out = new byte[destSize]; final int destOffset = (destSize - size); for (int i = 0; i < len; i += 2) { final int h = hexToBin(hex.charAt(i)); final int l = hexToBin(hex.charAt(i + 1)); if (h == -1) { throw new IllegalArgumentException( String.format( "Illegal character '%c' found at index %d in hex binary representation %s", hex.charAt(i), i - idxShift, str)); } if (l == -1) { throw new IllegalArgumentException( String.format( "Illegal character '%c' found at index %d in hex binary representation %s", hex.charAt(i + 1), i + 1 - idxShift, str)); } out[destOffset + (i / 2)] = (byte) (h * 16 + l); } return out; } private static int hexToBin(final char ch) { if ('0' <= ch && ch <= '9') { return ch - 48; } else if ('A' <= ch && ch <= 'F') { return ch - 65 + 10; } else { return 'a' <= ch && ch <= 'f' ? ch - 97 + 10 : -1; } } private static int leadingZeros(final BytesValue bytes) { for (int i = 0; i < bytes.size(); i++) { if (bytes.get(i) != 0) { return i; } } return bytes.size(); } }
1
19,505
This is a pretty idiosyncratic method - I'd move it into the JSON-RPC package rather than adding it to `BytesValue`.
hyperledger-besu
java
@@ -424,11 +424,15 @@ module RSpec superclass.method(:next_runnable_index_for), description, *args, &example_group_block ) + + config = RSpec.configuration + config.apply_derived_metadata_to(@metadata) + ExampleGroups.assign_const(self) @currently_executing_a_context_hook = false - RSpec.configuration.configure_group(self) + config.configure_group(self) end # @private
1
RSpec::Support.require_rspec_support 'recursive_const_methods' module RSpec module Core # rubocop:disable Metrics/ClassLength # ExampleGroup and {Example} are the main structural elements of # rspec-core. Consider this example: # # RSpec.describe Thing do # it "does something" do # end # end # # The object returned by `describe Thing` is a subclass of ExampleGroup. # The object returned by `it "does something"` is an instance of Example, # which serves as a wrapper for an instance of the ExampleGroup in which it # is declared. # # Example group bodies (e.g. `describe` or `context` blocks) are evaluated # in the context of a new subclass of ExampleGroup. Individual examples are # evaluated in the context of an instance of the specific ExampleGroup # subclass to which they belong. # # Besides the class methods defined here, there are other interesting macros # defined in {Hooks}, {MemoizedHelpers::ClassMethods} and # {SharedExampleGroup}. There are additional instance methods available to # your examples defined in {MemoizedHelpers} and {Pending}. class ExampleGroup extend Hooks include MemoizedHelpers extend MemoizedHelpers::ClassMethods include Pending extend SharedExampleGroup # Define a singleton method for the singleton class (remove the method if # it's already been defined). # @private def self.idempotently_define_singleton_method(name, &definition) (class << self; self; end).module_exec do remove_method(name) if method_defined?(name) && instance_method(name).owner == self define_method(name, &definition) end end # @!group Metadata # The [Metadata](Metadata) object associated with this group. # @see Metadata def self.metadata @metadata ||= nil end # Temporarily replace the provided metadata. # Intended primarily to allow an example group's singleton class # to return the metadata of the example that it exists for. This # is necessary for shared example group inclusion to work properly # with singleton example groups. # @private def self.with_replaced_metadata(meta) orig_metadata = metadata @metadata = meta yield ensure @metadata = orig_metadata end # @private # @return [Metadata] belonging to the parent of a nested {ExampleGroup} def self.superclass_metadata @superclass_metadata ||= superclass.respond_to?(:metadata) ? superclass.metadata : nil end # @private def self.delegate_to_metadata(*names) names.each do |name| idempotently_define_singleton_method(name) { metadata.fetch(name) } end end delegate_to_metadata :described_class, :file_path, :location # @return [String] the current example group description def self.description description = metadata[:description] RSpec.configuration.format_docstrings_block.call(description) end # Returns the class or module passed to the `describe` method (or alias). # Returns nil if the subject is not a class or module. # @example # RSpec.describe Thing do # it "does something" do # described_class == Thing # end # end # def described_class self.class.described_class end # @!endgroup # @!group Defining Examples # @private # @macro [attach] define_example_method # @!scope class # @method $1 # @overload $1 # @overload $1(&example_implementation) # @param example_implementation [Block] The implementation of the example. # @overload $1(doc_string, *metadata_keys, metadata={}) # @param doc_string [String] The example's doc string. # @param metadata [Hash] Metadata for the example. # @param metadata_keys [Array<Symbol>] Metadata tags for the example. # Will be transformed into hash entries with `true` values. # @overload $1(doc_string, *metadata_keys, metadata={}, &example_implementation) # @param doc_string [String] The example's doc string. # @param metadata [Hash] Metadata for the example. # @param metadata_keys [Array<Symbol>] Metadata tags for the example. # Will be transformed into hash entries with `true` values. # @param example_implementation [Block] The implementation of the example. # @yield [Example] the example object # @example # $1 do # end # # $1 "does something" do # end # # $1 "does something", :slow, :uses_js do # end # # $1 "does something", :with => 'additional metadata' do # end # # $1 "does something" do |ex| # # ex is the Example object that contains metadata about the example # end def self.define_example_method(name, extra_options={}) idempotently_define_singleton_method(name) do |*all_args, &block| desc, *args = *all_args options = Metadata.build_hash_from(args) options.update(:skip => RSpec::Core::Pending::NOT_YET_IMPLEMENTED) unless block options.update(extra_options) RSpec::Core::Example.new(self, desc, options, block) end end # Defines an example within a group. define_example_method :example # Defines an example within a group. # This is the primary API to define a code example. define_example_method :it # Defines an example within a group. # Useful for when your docstring does not read well off of `it`. # @example # RSpec.describe MyClass do # specify "#do_something is deprecated" do # # ... # end # end define_example_method :specify # Shortcut to define an example with `:focus => true`. # @see example define_example_method :focus, :focus => true # Shortcut to define an example with `:focus => true`. # @see example define_example_method :fexample, :focus => true # Shortcut to define an example with `:focus => true`. # @see example define_example_method :fit, :focus => true # Shortcut to define an example with `:focus => true`. # @see example define_example_method :fspecify, :focus => true # Shortcut to define an example with `:skip => 'Temporarily skipped with xexample'`. # @see example define_example_method :xexample, :skip => 'Temporarily skipped with xexample' # Shortcut to define an example with `:skip => 'Temporarily skipped with xit'`. # @see example define_example_method :xit, :skip => 'Temporarily skipped with xit' # Shortcut to define an example with `:skip => 'Temporarily skipped with xspecify'`. # @see example define_example_method :xspecify, :skip => 'Temporarily skipped with xspecify' # Shortcut to define an example with `:skip => true` # @see example define_example_method :skip, :skip => true # Shortcut to define an example with `:pending => true` # @see example define_example_method :pending, :pending => true # @!endgroup # @!group Defining Example Groups # @private # @macro [attach] define_example_group_method # @!scope class # @overload $1 # @overload $1(&example_group_definition) # @param example_group_definition [Block] The definition of the example group. # @overload $1(doc_string, *metadata_keys, metadata={}, &example_implementation) # @param doc_string [String] The group's doc string. # @param metadata [Hash] Metadata for the group. # @param metadata_keys [Array<Symbol>] Metadata tags for the group. # Will be transformed into hash entries with `true` values. # @param example_group_definition [Block] The definition of the example group. # # Generates a subclass of this example group which inherits # everything except the examples themselves. # # @example # # RSpec.describe "something" do # << This describe method is defined in # # << RSpec::Core::DSL, included in the # # << global namespace (optional) # before do # do_something_before # end # # let(:thing) { Thing.new } # # $1 "attribute (of something)" do # # examples in the group get the before hook # # declared above, and can access `thing` # end # end # # @see DSL#describe def self.define_example_group_method(name, metadata={}) idempotently_define_singleton_method(name) do |*args, &example_group_block| thread_data = RSpec::Support.thread_local_data top_level = self == ExampleGroup registration_collection = if top_level if thread_data[:in_example_group] raise "Creating an isolated context from within a context is " \ "not allowed. Change `RSpec.#{name}` to `#{name}` or " \ "move this to a top-level scope." end thread_data[:in_example_group] = true RSpec.world.example_groups else children end begin description = args.shift combined_metadata = metadata.dup combined_metadata.merge!(args.pop) if args.last.is_a? Hash args << combined_metadata subclass(self, description, args, registration_collection, &example_group_block) ensure thread_data.delete(:in_example_group) if top_level end end RSpec::Core::DSL.expose_example_group_alias(name) end define_example_group_method :example_group # An alias of `example_group`. Generally used when grouping examples by a # thing you are describing (e.g. an object, class or method). # @see example_group define_example_group_method :describe # An alias of `example_group`. Generally used when grouping examples # contextually (e.g. "with xyz", "when xyz" or "if xyz"). # @see example_group define_example_group_method :context # Shortcut to temporarily make an example group skipped. # @see example_group define_example_group_method :xdescribe, :skip => "Temporarily skipped with xdescribe" # Shortcut to temporarily make an example group skipped. # @see example_group define_example_group_method :xcontext, :skip => "Temporarily skipped with xcontext" # Shortcut to define an example group with `:focus => true`. # @see example_group define_example_group_method :fdescribe, :focus => true # Shortcut to define an example group with `:focus => true`. # @see example_group define_example_group_method :fcontext, :focus => true # @!endgroup # @!group Including Shared Example Groups # @private # @macro [attach] define_nested_shared_group_method # @!scope class # # @see SharedExampleGroup def self.define_nested_shared_group_method(new_name, report_label="it should behave like") idempotently_define_singleton_method(new_name) do |name, *args, &customization_block| # Pass :caller so the :location metadata is set properly. # Otherwise, it'll be set to the next line because that's # the block's source_location. group = example_group("#{report_label} #{name}", :caller => (the_caller = caller)) do find_and_eval_shared("examples", name, the_caller.first, *args, &customization_block) end group.metadata[:shared_group_name] = name group end end # Generates a nested example group and includes the shared content # mapped to `name` in the nested group. define_nested_shared_group_method :it_behaves_like, "behaves like" # Generates a nested example group and includes the shared content # mapped to `name` in the nested group. define_nested_shared_group_method :it_should_behave_like # Includes shared content mapped to `name` directly in the group in which # it is declared, as opposed to `it_behaves_like`, which creates a nested # group. If given a block, that block is also eval'd in the current # context. # # @see SharedExampleGroup def self.include_context(name, *args, &block) find_and_eval_shared("context", name, caller.first, *args, &block) end # Includes shared content mapped to `name` directly in the group in which # it is declared, as opposed to `it_behaves_like`, which creates a nested # group. If given a block, that block is also eval'd in the current # context. # # @see SharedExampleGroup def self.include_examples(name, *args, &block) find_and_eval_shared("examples", name, caller.first, *args, &block) end # Clear memoized values when adding/removing examples # @private def self.reset_memoized @descendant_filtered_examples = nil @_descendants = nil @parent_groups = nil @declaration_locations = nil end # Adds an example to the example group def self.add_example(example) reset_memoized examples << example end # Removes an example from the example group def self.remove_example(example) reset_memoized examples.delete example end # @private def self.find_and_eval_shared(label, name, inclusion_location, *args, &customization_block) shared_module = RSpec.world.shared_example_group_registry.find(parent_groups, name) unless shared_module raise ArgumentError, "Could not find shared #{label} #{name.inspect}" end shared_module.include_in( self, Metadata.relative_path(inclusion_location), args, customization_block ) end # @!endgroup # @private def self.subclass(parent, description, args, registration_collection, &example_group_block) subclass = Class.new(parent) subclass.set_it_up(description, args, registration_collection, &example_group_block) subclass.module_exec(&example_group_block) if example_group_block # The LetDefinitions module must be included _after_ other modules # to ensure that it takes precedence when there are name collisions. # Thus, we delay including it until after the example group block # has been eval'd. MemoizedHelpers.define_helpers_on(subclass) subclass end # @private def self.set_it_up(description, args, registration_collection, &example_group_block) # Ruby 1.9 has a bug that can lead to infinite recursion and a # SystemStackError if you include a module in a superclass after # including it in a subclass: https://gist.github.com/845896 # To prevent this, we must include any modules in # RSpec::Core::ExampleGroup before users create example groups and have # a chance to include the same module in a subclass of # RSpec::Core::ExampleGroup. So we need to configure example groups # here. ensure_example_groups_are_configured # Register the example with the group before creating the metadata hash. # This is necessary since creating the metadata hash triggers # `when_first_matching_example_defined` callbacks, in which users can # load RSpec support code which defines hooks. For that to work, the # examples and example groups must be registered at the time the # support code is called or be defined afterwards. # Begin defined beforehand but registered afterwards causes hooks to # not be applied where they should. registration_collection << self @user_metadata = Metadata.build_hash_from(args) @metadata = Metadata::ExampleGroupHash.create( superclass_metadata, @user_metadata, superclass.method(:next_runnable_index_for), description, *args, &example_group_block ) ExampleGroups.assign_const(self) @currently_executing_a_context_hook = false RSpec.configuration.configure_group(self) end # @private def self.examples @examples ||= [] end # @private def self.filtered_examples RSpec.world.filtered_examples[self] end # @private def self.descendant_filtered_examples @descendant_filtered_examples ||= filtered_examples + FlatMap.flat_map(children, &:descendant_filtered_examples) end # @private def self.children @children ||= [] end # @private # Traverses the tree of groups, starting with `self`, then the children, recursively. # Halts the traversal of a branch of the tree as soon as the passed block returns true. # Note that siblings groups and their sub-trees will continue to be explored. # This is intended to make it easy to find the top-most group that satisfies some # condition. def self.traverse_tree_until(&block) return if yield self children.each do |child| child.traverse_tree_until(&block) end end # @private def self.next_runnable_index_for(file) if self == ExampleGroup # We add 1 so the ids start at 1 instead of 0. This is # necessary for this branch (but not for the other one) # because we register examples and groups with the # `children` and `examples` collection BEFORE this # method is called as part of metadata hash creation, # but the example group is recorded with # `RSpec.world.example_group_counts_by_spec_file` AFTER # the metadata hash is created and the group is returned # to the caller. RSpec.world.num_example_groups_defined_in(file) + 1 else children.count + examples.count end end # @private def self.descendants @_descendants ||= [self] + FlatMap.flat_map(children, &:descendants) end ## @private def self.parent_groups @parent_groups ||= ancestors.select { |a| a < RSpec::Core::ExampleGroup } end # @private def self.top_level? superclass == ExampleGroup end # @private def self.ensure_example_groups_are_configured unless defined?(@@example_groups_configured) RSpec.configuration.configure_mock_framework RSpec.configuration.configure_expectation_framework # rubocop:disable Style/ClassVars @@example_groups_configured = true # rubocop:enable Style/ClassVars end end # @private def self.before_context_ivars @before_context_ivars ||= {} end # @private def self.store_before_context_ivars(example_group_instance) each_instance_variable_for_example(example_group_instance) do |ivar| before_context_ivars[ivar] = example_group_instance.instance_variable_get(ivar) end end # Returns true if a `before(:context)` or `after(:context)` # hook is currently executing. def self.currently_executing_a_context_hook? @currently_executing_a_context_hook end # @private def self.run_before_context_hooks(example_group_instance) set_ivars(example_group_instance, superclass_before_context_ivars) @currently_executing_a_context_hook = true ContextHookMemoized::Before.isolate_for_context_hook(example_group_instance) do hooks.run(:before, :context, example_group_instance) end ensure store_before_context_ivars(example_group_instance) @currently_executing_a_context_hook = false end if RUBY_VERSION.to_f >= 1.9 # @private def self.superclass_before_context_ivars superclass.before_context_ivars end else # 1.8.7 # :nocov: # @private def self.superclass_before_context_ivars if superclass.respond_to?(:before_context_ivars) superclass.before_context_ivars else # `self` must be the singleton class of an ExampleGroup instance. # On 1.8.7, the superclass of a singleton class of an instance of A # is A's singleton class. On 1.9+, it's A. On 1.8.7, the first ancestor # is A, so we can mirror 1.8.7's behavior here. Note that we have to # search for the first that responds to `before_context_ivars` # in case a module has been included in the singleton class. ancestors.find { |a| a.respond_to?(:before_context_ivars) }.before_context_ivars end end # :nocov: end # @private def self.run_after_context_hooks(example_group_instance) set_ivars(example_group_instance, before_context_ivars) @currently_executing_a_context_hook = true ContextHookMemoized::After.isolate_for_context_hook(example_group_instance) do hooks.run(:after, :context, example_group_instance) end ensure before_context_ivars.clear @currently_executing_a_context_hook = false end # Runs all the examples in this group. def self.run(reporter=RSpec::Core::NullReporter) return if RSpec.world.wants_to_quit reporter.example_group_started(self) should_run_context_hooks = descendant_filtered_examples.any? begin run_before_context_hooks(new('before(:context) hook')) if should_run_context_hooks result_for_this_group = run_examples(reporter) results_for_descendants = ordering_strategy.order(children).map { |child| child.run(reporter) }.all? result_for_this_group && results_for_descendants rescue Pending::SkipDeclaredInExample => ex for_filtered_examples(reporter) { |example| example.skip_with_exception(reporter, ex) } true rescue Support::AllExceptionsExceptOnesWeMustNotRescue => ex for_filtered_examples(reporter) { |example| example.fail_with_exception(reporter, ex) } RSpec.world.wants_to_quit = true if reporter.fail_fast_limit_met? false ensure run_after_context_hooks(new('after(:context) hook')) if should_run_context_hooks reporter.example_group_finished(self) end end # @private def self.ordering_strategy order = metadata.fetch(:order, :global) registry = RSpec.configuration.ordering_registry registry.fetch(order) do warn <<-WARNING.gsub(/^ +\|/, '') |WARNING: Ignoring unknown ordering specified using `:order => #{order.inspect}` metadata. | Falling back to configured global ordering. | Unrecognized ordering specified at: #{location} WARNING registry.fetch(:global) end end # @private def self.run_examples(reporter) ordering_strategy.order(filtered_examples).map do |example| next if RSpec.world.wants_to_quit instance = new(example.inspect_output) set_ivars(instance, before_context_ivars) succeeded = example.run(instance, reporter) if !succeeded && reporter.fail_fast_limit_met? RSpec.world.wants_to_quit = true end succeeded end.all? end # @private def self.for_filtered_examples(reporter, &block) filtered_examples.each(&block) children.each do |child| reporter.example_group_started(child) child.for_filtered_examples(reporter, &block) reporter.example_group_finished(child) end false end # @private def self.declaration_locations @declaration_locations ||= [Metadata.location_tuple_from(metadata)] + examples.map { |e| Metadata.location_tuple_from(e.metadata) } + FlatMap.flat_map(children, &:declaration_locations) end # @return [String] the unique id of this example group. Pass # this at the command line to re-run this exact example group. def self.id Metadata.id_from(metadata) end # @private def self.top_level_description parent_groups.last.description end # @private def self.set_ivars(instance, ivars) ivars.each { |name, value| instance.instance_variable_set(name, value) } end if RUBY_VERSION.to_f < 1.9 # :nocov: # @private INSTANCE_VARIABLE_TO_IGNORE = '@__inspect_output'.freeze # :nocov: else # @private INSTANCE_VARIABLE_TO_IGNORE = :@__inspect_output end # @private def self.each_instance_variable_for_example(group) group.instance_variables.each do |ivar| yield ivar unless ivar == INSTANCE_VARIABLE_TO_IGNORE end end def initialize(inspect_output=nil) @__inspect_output = inspect_output || '(no description provided)' super() # no args get passed end # @private def inspect "#<#{self.class} #{@__inspect_output}>" end unless method_defined?(:singleton_class) # for 1.8.7 # :nocov: # @private def singleton_class class << self; self; end end # :nocov: end # @private def self.update_inherited_metadata(updates) metadata.update(updates) do |key, existing_group_value, new_inherited_value| @user_metadata.key?(key) ? existing_group_value : new_inherited_value end RSpec.configuration.configure_group(self) examples.each { |ex| ex.update_inherited_metadata(updates) } children.each { |group| group.update_inherited_metadata(updates) } end # Raised when an RSpec API is called in the wrong scope, such as `before` # being called from within an example rather than from within an example # group block. WrongScopeError = Class.new(NoMethodError) def self.method_missing(name, *args) if method_defined?(name) raise WrongScopeError, "`#{name}` is not available on an example group (e.g. a " \ "`describe` or `context` block). It is only available from " \ "within individual examples (e.g. `it` blocks) or from " \ "constructs that run in the scope of an example (e.g. " \ "`before`, `let`, etc)." end super end private_class_method :method_missing private def method_missing(name, *args) if self.class.respond_to?(name) raise WrongScopeError, "`#{name}` is not available from within an example (e.g. an " \ "`it` block) or from constructs that run in the scope of an " \ "example (e.g. `before`, `let`, etc). It is only available " \ "on an example group (e.g. a `describe` or `context` block)." end super end end # rubocop:enable Metrics/ClassLength # @private # Unnamed example group used by `SuiteHookContext`. class AnonymousExampleGroup < ExampleGroup def self.metadata {} end end # Contains information about the inclusion site of a shared example group. class SharedExampleGroupInclusionStackFrame # @return [String] the name of the shared example group attr_reader :shared_group_name # @return [String] the location where the shared example was included attr_reader :inclusion_location def initialize(shared_group_name, inclusion_location) @shared_group_name = shared_group_name @inclusion_location = inclusion_location end # @return [String] The {#inclusion_location}, formatted for display by a formatter. def formatted_inclusion_location @formatted_inclusion_location ||= begin RSpec.configuration.backtrace_formatter.backtrace_line( inclusion_location.sub(/(:\d+):in .+$/, '\1') ) end end # @return [String] Description of this stack frame, in the form used by # RSpec's built-in formatters. def description @description ||= "Shared Example Group: #{shared_group_name.inspect} " \ "called from #{formatted_inclusion_location}" end # @private def self.current_backtrace shared_example_group_inclusions.reverse end # @private def self.with_frame(name, location) current_stack = shared_example_group_inclusions if current_stack.any? { |frame| frame.shared_group_name == name } raise ArgumentError, "can't include shared examples recursively" else current_stack << new(name, location) yield end ensure current_stack.pop end # @private def self.shared_example_group_inclusions RSpec::Support.thread_local_data[:shared_example_group_inclusions] ||= [] end end end # @private # # Namespace for the example group subclasses generated by top-level # `describe`. module ExampleGroups extend Support::RecursiveConstMethods def self.assign_const(group) base_name = base_name_for(group) const_scope = constant_scope_for(group) name = disambiguate(base_name, const_scope) const_scope.const_set(name, group) end def self.constant_scope_for(group) const_scope = group.superclass const_scope = self if const_scope == ::RSpec::Core::ExampleGroup const_scope end def self.remove_all_constants constants.each do |constant| __send__(:remove_const, constant) end end def self.base_name_for(group) return "Anonymous".dup if group.description.empty? # Convert to CamelCase. name = ' ' + group.description name.gsub!(/[^0-9a-zA-Z]+([0-9a-zA-Z])/) do match = ::Regexp.last_match[1] match.upcase! match end name.lstrip! # Remove leading whitespace name.gsub!(/\W/, ''.freeze) # JRuby, RBX and others don't like non-ascii in const names # Ruby requires first const letter to be A-Z. Use `Nested` # as necessary to enforce that. name.gsub!(/\A([^A-Z]|\z)/, 'Nested\1'.freeze) name end if RUBY_VERSION == '1.9.2' # :nocov: class << self alias _base_name_for base_name_for def base_name_for(group) _base_name_for(group) + '_' end end private_class_method :_base_name_for # :nocov: end def self.disambiguate(name, const_scope) return name unless const_defined_on?(const_scope, name) # Add a trailing number if needed to disambiguate from an existing # constant. name << "_2" name.next! while const_defined_on?(const_scope, name) name end end end
1
17,389
Is there a way to deduplicate this somehow? Could we roll it into `RSpec.configuration` with a bit of memoization or is there a chicken and egg problem.
rspec-rspec-core
rb
@@ -49,6 +49,7 @@ public class ResultSetPropertyTester extends PropertyTester public static final String PROP_CAN_NAVIGATE_HISTORY = "canNavigateHistory"; public static final String PROP_EDITABLE = "editable"; public static final String PROP_CHANGED = "changed"; + private static final String PROP_UPDATE_NOT_IN_PROGRESS = "updateNotInProgress"; private static final Log log = Log.getLog(ResultSetPropertyTester.class);
1
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2021 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ui.controls.resultset.handler; import org.eclipse.core.expressions.PropertyTester; import org.eclipse.ui.IWorkbenchPart; import org.jkiss.dbeaver.Log; import org.jkiss.dbeaver.model.DBUtils; import org.jkiss.dbeaver.model.data.DBDAttributeBinding; import org.jkiss.dbeaver.model.struct.DBSDataContainer; import org.jkiss.dbeaver.runtime.DBWorkbench; import org.jkiss.dbeaver.ui.ActionUtils; import org.jkiss.dbeaver.ui.controls.resultset.ResultSetRow; import org.jkiss.dbeaver.ui.controls.resultset.ResultSetViewer; import org.jkiss.utils.CommonUtils; /** * DatabaseEditorPropertyTester */ public class ResultSetPropertyTester extends PropertyTester { public static final String NAMESPACE = "org.jkiss.dbeaver.core.resultset"; public static final String PROP_ACTIVE = "active"; public static final String PROP_HAS_DATA = "hasData"; public static final String PROP_HAS_MORE_DATA = "hasMoreData"; public static final String PROP_HAS_FILTERS = "hasfilters"; public static final String PROP_CAN_COPY = "canCopy"; public static final String PROP_CAN_PASTE = "canPaste"; public static final String PROP_CAN_CUT = "canCut"; public static final String PROP_CAN_MOVE = "canMove"; public static final String PROP_CAN_TOGGLE = "canToggle"; public static final String PROP_CAN_SWITCH_PRESENTATION = "canSwitchPresentation"; public static final String PROP_CAN_NAVIGATE_LINK = "canNavigateLink"; public static final String PROP_SUPPORTS_COUNT = "supportsCount"; public static final String PROP_CAN_NAVIGATE_HISTORY = "canNavigateHistory"; public static final String PROP_EDITABLE = "editable"; public static final String PROP_CHANGED = "changed"; private static final Log log = Log.getLog(ResultSetPropertyTester.class); @Override public boolean test(Object receiver, String property, Object[] args, Object expectedValue) { try { ResultSetViewer rsv = (ResultSetViewer) ResultSetHandlerMain.getActiveResultSet((IWorkbenchPart)receiver); return rsv != null && checkResultSetProperty(rsv, property, expectedValue); } catch (Throwable e) { if (!DBWorkbench.getPlatform().isShuttingDown()) { // FIXME: bug in Eclipse. To remove in future. log.debug(e); } return false; } } private boolean checkResultSetProperty(ResultSetViewer rsv, String property, Object expectedValue) { boolean actionsDisabled = rsv.isActionsDisabled(); switch (property) { case PROP_ACTIVE: return true; case PROP_HAS_DATA: return rsv.getModel().hasData(); case PROP_HAS_MORE_DATA: return rsv.isHasMoreData(); case PROP_HAS_FILTERS: return rsv.getModel().getDataFilter().hasFilters(); case PROP_CAN_COPY: return !actionsDisabled && rsv.getModel().hasData(); case PROP_CAN_PASTE: case PROP_CAN_CUT: { if (actionsDisabled || !rsv.supportsEdit()) { return false; } DBDAttributeBinding attr = rsv.getActivePresentation().getCurrentAttribute(); return attr != null && rsv.getAttributeReadOnlyStatus(attr) == null; } case PROP_CAN_MOVE: { if (actionsDisabled || !rsv.supportsNavigation()) return false; ResultSetRow currentRow = rsv.getCurrentRow(); if ("back".equals(expectedValue)) { return currentRow != null && currentRow.getVisualNumber() > 0; } else if ("forward".equals(expectedValue)) { return currentRow != null && currentRow.getVisualNumber() < rsv.getModel().getRowCount() - 1; } break; } case PROP_EDITABLE: { if (actionsDisabled || !rsv.hasData() || !rsv.supportsEdit()) { return false; } if ("edit".equals(expectedValue) || "inline".equals(expectedValue)) { DBDAttributeBinding attr = rsv.getActivePresentation().getCurrentAttribute(); if (attr == null) { return false; } if ("inline".equals(expectedValue)) { return rsv.getAttributeReadOnlyStatus(attr) == null; } else { return rsv.getCurrentRow() != null; } } else if ("add".equals(expectedValue)) { return rsv.isInsertable(); } else if ("copy".equals(expectedValue) || "delete".equals(expectedValue)) { ResultSetRow currentRow = rsv.getCurrentRow(); return currentRow != null && rsv.isInsertable(); } else { return false; } } case PROP_CHANGED: return rsv.isDirty(); case PROP_CAN_TOGGLE: return !actionsDisabled && rsv.isPresentationInFocus(); case PROP_CAN_SWITCH_PRESENTATION: return !actionsDisabled && !rsv.isRefreshInProgress() && !rsv.getAvailablePresentations().isEmpty(); case PROP_SUPPORTS_COUNT: return rsv.hasData() && rsv.isHasMoreData() && (rsv.getDataContainer().getSupportedFeatures() & DBSDataContainer.DATA_COUNT) != 0; case PROP_CAN_NAVIGATE_LINK: if (!actionsDisabled && rsv.getModel().hasData()) { final ResultSetRow row = rsv.getCurrentRow(); if (row != null) { DBDAttributeBinding attr = rsv.getActivePresentation().getCurrentAttribute(); if (attr != null) { Object value = rsv.getModel().getCellValue(attr, row); return !CommonUtils.isEmpty(attr.getReferrers()) && !DBUtils.isNullValue(value); } } } return false; case PROP_CAN_NAVIGATE_HISTORY: if (!actionsDisabled && rsv.getModel().hasData()) { if (expectedValue instanceof Number && ((Number)expectedValue).intValue() == 1 || "1".equals(expectedValue)) { return rsv.getHistoryPosition() < rsv.getHistorySize() - 1; } else { return rsv.getHistoryPosition() > 0; } } } return false; } public static void firePropertyChange(String propName) { ActionUtils.evaluatePropertyState(NAMESPACE + "." + propName); } }
1
11,672
Generally, I wouldn't say I like the idea of providing properties that return negative/inverted values. It would be more convenient to return whether the update is **in** progress or not.
dbeaver-dbeaver
java
@@ -256,7 +256,7 @@ import 'flexStyles'; const runtimeTicks = playbackManager.duration(options.player); if (runtimeTicks) { - const timeRemainingTicks = runtimeTicks - playbackManager.currentTime(options.player); + const timeRemainingTicks = runtimeTicks - playbackManager.currentTime(options.player) * 10000; return Math.round(timeRemainingTicks / 10000); }
1
import dom from 'dom'; import playbackManager from 'playbackManager'; import connectionManager from 'connectionManager'; import events from 'events'; import mediaInfo from 'mediaInfo'; import layoutManager from 'layoutManager'; import focusManager from 'focusManager'; import globalize from 'globalize'; import itemHelper from 'itemHelper'; import 'css!./upnextdialog'; import 'emby-button'; import 'flexStyles'; /* eslint-disable indent */ const transitionEndEventName = dom.whichTransitionEvent(); function seriesImageUrl(item, options) { if (item.Type !== 'Episode') { return null; } options = options || {}; options.type = options.type || 'Primary'; if (options.type === 'Primary') { if (item.SeriesPrimaryImageTag) { options.tag = item.SeriesPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } } if (options.type === 'Thumb') { if (item.SeriesThumbImageTag) { options.tag = item.SeriesThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } if (item.ParentThumbImageTag) { options.tag = item.ParentThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options); } } return null; } function imageUrl(item, options) { options = options || {}; options.type = options.type || 'Primary'; if (item.ImageTags && item.ImageTags[options.type]) { options.tag = item.ImageTags[options.type]; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.PrimaryImageItemId || item.Id, options); } if (options.type === 'Primary') { if (item.AlbumId && item.AlbumPrimaryImageTag) { options.tag = item.AlbumPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options); } } return null; } function setPoster(osdPoster, item, secondaryItem) { if (item) { let imgUrl = seriesImageUrl(item, { type: 'Primary' }) || seriesImageUrl(item, { type: 'Thumb' }) || imageUrl(item, { type: 'Primary' }); if (!imgUrl && secondaryItem) { imgUrl = seriesImageUrl(secondaryItem, { type: 'Primary' }) || seriesImageUrl(secondaryItem, { type: 'Thumb' }) || imageUrl(secondaryItem, { type: 'Primary' }); } if (imgUrl) { osdPoster.innerHTML = '<img class="upNextDialog-poster-img" src="' + imgUrl + '" />'; return; } } osdPoster.innerHTML = ''; } function getHtml() { let html = ''; html += '<div class="upNextDialog-poster">'; html += '</div>'; html += '<div class="flex flex-direction-column flex-grow">'; html += '<h2 class="upNextDialog-nextVideoText" style="margin:.25em 0;">&nbsp;</h2>'; html += '<h3 class="upNextDialog-title" style="margin:.25em 0 .5em;"></h3>'; html += '<div class="flex flex-direction-row upNextDialog-mediainfo">'; html += '</div>'; html += '<div class="upNextDialog-overview" style="margin-top:1em;"></div>'; html += '<div class="flex flex-direction-row upNextDialog-buttons" style="margin-top:1em;">'; html += '<button type="button" is="emby-button" class="raised raised-mini btnStartNow upNextDialog-button">'; html += globalize.translate('HeaderStartNow'); html += '</button>'; html += '<button type="button" is="emby-button" class="raised raised-mini btnHide upNextDialog-button">'; html += globalize.translate('Hide'); html += '</button>'; // buttons html += '</div>'; // main html += '</div>'; return html; } function setNextVideoText() { const instance = this; const elem = instance.options.parent; const secondsRemaining = Math.max(Math.round(getTimeRemainingMs(instance) / 1000), 0); console.debug('up next seconds remaining: ' + secondsRemaining); const timeText = '<span class="upNextDialog-countdownText">' + globalize.translate('HeaderSecondsValue', secondsRemaining) + '</span>'; const nextVideoText = instance.itemType === 'Episode' ? globalize.translate('HeaderNextEpisodePlayingInValue', timeText) : globalize.translate('HeaderNextVideoPlayingInValue', timeText); elem.querySelector('.upNextDialog-nextVideoText').innerHTML = nextVideoText; } function fillItem(item) { const instance = this; const elem = instance.options.parent; setPoster(elem.querySelector('.upNextDialog-poster'), item); elem.querySelector('.upNextDialog-overview').innerHTML = item.Overview || ''; elem.querySelector('.upNextDialog-mediainfo').innerHTML = mediaInfo.getPrimaryMediaInfoHtml(item, { }); let title = itemHelper.getDisplayName(item); if (item.SeriesName) { title = item.SeriesName + ' - ' + title; } elem.querySelector('.upNextDialog-title').innerHTML = title || ''; instance.itemType = item.Type; instance.show(); } function clearCountdownTextTimeout(instance) { if (instance._countdownTextTimeout) { clearInterval(instance._countdownTextTimeout); instance._countdownTextTimeout = null; } } function onStartNowClick() { const options = this.options; if (options) { const player = options.player; this.hide(); playbackManager.nextTrack(player); } } function init(instance, options) { options.parent.innerHTML = getHtml(); options.parent.classList.add('hide'); options.parent.classList.add('upNextDialog'); options.parent.classList.add('upNextDialog-hidden'); fillItem.call(instance, options.nextItem); options.parent.querySelector('.btnHide').addEventListener('click', instance.hide.bind(instance)); options.parent.querySelector('.btnStartNow').addEventListener('click', onStartNowClick.bind(instance)); } function clearHideAnimationEventListeners(instance, elem) { const fn = instance._onHideAnimationComplete; if (fn) { dom.removeEventListener(elem, transitionEndEventName, fn, { once: true }); } } function onHideAnimationComplete(e) { const instance = this; const elem = e.target; elem.classList.add('hide'); clearHideAnimationEventListeners(instance, elem); events.trigger(instance, 'hide'); } function hideComingUpNext() { const instance = this; clearCountdownTextTimeout(this); if (!instance.options) { return; } const elem = instance.options.parent; if (!elem) { return; } clearHideAnimationEventListeners(this, elem); if (elem.classList.contains('upNextDialog-hidden')) { return; } // trigger a reflow to force it to animate again void elem.offsetWidth; elem.classList.add('upNextDialog-hidden'); const fn = onHideAnimationComplete.bind(instance); instance._onHideAnimationComplete = fn; dom.addEventListener(elem, transitionEndEventName, fn, { once: true }); } function getTimeRemainingMs(instance) { const options = instance.options; if (options) { const runtimeTicks = playbackManager.duration(options.player); if (runtimeTicks) { const timeRemainingTicks = runtimeTicks - playbackManager.currentTime(options.player); return Math.round(timeRemainingTicks / 10000); } } return 0; } function startComingUpNextHideTimer(instance) { const timeRemainingMs = getTimeRemainingMs(instance); if (timeRemainingMs <= 0) { return; } setNextVideoText.call(instance); clearCountdownTextTimeout(instance); instance._countdownTextTimeout = setInterval(setNextVideoText.bind(instance), 400); } class UpNextDialog { constructor(options) { this.options = options; init(this, options); } show() { const elem = this.options.parent; clearHideAnimationEventListeners(this, elem); elem.classList.remove('hide'); // trigger a reflow to force it to animate again void elem.offsetWidth; elem.classList.remove('upNextDialog-hidden'); if (layoutManager.tv) { setTimeout(function () { focusManager.focus(elem.querySelector('.btnStartNow')); }, 50); } startComingUpNextHideTimer(this); } hide() { hideComingUpNext.call(this); } destroy() { hideComingUpNext.call(this); this.options = null; this.itemType = null; } } export default UpNextDialog; /* eslint-enable indent */
1
17,371
Converting playbackManager to operate in ms (preferred solution, imo) would allow us to completely drop this function.
jellyfin-jellyfin-web
js
@@ -8,6 +8,7 @@ DEFINE_int32(max_handlers_per_req, 10, "The max handlers used to handle one request"); DEFINE_int32(min_vertices_per_bucket, 3, "The min vertices number in one bucket"); +DEFINE_int32(max_edge_returned_per_vertex, 1000, "The max edge number returnred searching vertex"); namespace nebula { namespace storage {
1
/* Copyright (c) 2019 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "storage/QueryBaseProcessor.h" DEFINE_int32(max_handlers_per_req, 10, "The max handlers used to handle one request"); DEFINE_int32(min_vertices_per_bucket, 3, "The min vertices number in one bucket"); namespace nebula { namespace storage { } // namespace storage } // namespace nebula
1
24,129
The default value should be a bigger one, I'd like it is the maximum of int32. That means we will not cut-off anything by default. And users would config it with an appropriate value in config-file, such as 1000 or 5000, etc.
vesoft-inc-nebula
cpp
@@ -221,7 +221,10 @@ func (c *cliApp) settle(args []string) { clio.Info(fmt.Sprintf("Hermes fee: %v MYST", hermesFee.String())) return } - hermesID := c.config.GetStringByFlag(config.FlagHermesID) + hermesID, err := c.config.GetHermesID() + if err != nil { + clio.Warn("could not get hermes id: ", err) + } clio.Info("Waiting for settlement to complete") errChan := make(chan error)
1
/* * Copyright (C) 2020 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package cli import ( "fmt" "math/big" "strings" "time" "github.com/mysteriumnetwork/node/cmd/commands/cli/clio" "github.com/mysteriumnetwork/node/config" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/money" "github.com/pkg/errors" ) func (c *cliApp) identities(argsString string) { var usage = strings.Join([]string{ "Usage: identities <action> [args]", "Available actions:", " " + usageListIdentities, " " + usageGetIdentity, " " + usageNewIdentity, " " + usageUnlockIdentity, " " + usageRegisterIdentity, " " + usageSettle, " " + usageGetReferralCode, }, "\n") if len(argsString) == 0 { clio.Info(usage) return } args := strings.Fields(argsString) action := args[0] actionArgs := args[1:] switch action { case "list": c.listIdentities(actionArgs) case "get": c.getIdentity(actionArgs) case "new": c.newIdentity(actionArgs) case "unlock": c.unlockIdentity(actionArgs) case "register": c.registerIdentity(actionArgs) case "beneficiary": c.setBeneficiary(actionArgs) case "settle": c.settle(actionArgs) case "referralcode": c.getReferralCode(actionArgs) default: clio.Warnf("Unknown sub-command '%s'\n", argsString) fmt.Println(usage) } } const usageListIdentities = "list" func (c *cliApp) listIdentities(args []string) { if len(args) > 0 { clio.Info("Usage: " + usageListIdentities) return } ids, err := c.tequilapi.GetIdentities() if err != nil { fmt.Println("Error occurred:", err) return } for _, id := range ids { clio.Status("+", id.Address) } } const usageGetIdentity = "get <identity>" func (c *cliApp) getIdentity(actionArgs []string) { if len(actionArgs) != 1 { clio.Info("Usage: " + usageGetIdentity) return } address := actionArgs[0] identityStatus, err := c.tequilapi.Identity(address) if err != nil { clio.Warn(err) return } clio.Info("Registration Status:", identityStatus.RegistrationStatus) clio.Info("Channel address:", identityStatus.ChannelAddress) clio.Info(fmt.Sprintf("Balance: %s", money.New(identityStatus.Balance))) clio.Info(fmt.Sprintf("Earnings: %s", money.New(identityStatus.Earnings))) clio.Info(fmt.Sprintf("Earnings total: %s", money.New(identityStatus.EarningsTotal))) } const usageNewIdentity = "new [passphrase]" func (c *cliApp) newIdentity(args []string) { if len(args) > 1 { clio.Info("Usage: " + usageNewIdentity) return } passphrase := identityDefaultPassphrase if len(args) == 1 { passphrase = args[0] } id, err := c.tequilapi.NewIdentity(passphrase) if err != nil { clio.Warn(err) return } clio.Success("New identity created:", id.Address) } const usageUnlockIdentity = "unlock <identity> [passphrase]" func (c *cliApp) unlockIdentity(actionArgs []string) { if len(actionArgs) < 1 { clio.Info("Usage: " + usageUnlockIdentity) return } address := actionArgs[0] var passphrase string if len(actionArgs) >= 2 { passphrase = actionArgs[1] } clio.Info("Unlocking", address) err := c.tequilapi.Unlock(address, passphrase) if err != nil { clio.Warn(err) return } clio.Success(fmt.Sprintf("Identity %s unlocked.", address)) } const usageRegisterIdentity = "register <identity> [stake] [beneficiary] [referralcode]" func (c *cliApp) registerIdentity(actionArgs []string) { if len(actionArgs) < 1 || len(actionArgs) > 4 { clio.Info("Usage: " + usageRegisterIdentity) return } var address = actionArgs[0] stake := new(big.Int).SetInt64(0) if len(actionArgs) >= 2 { s, ok := new(big.Int).SetString(actionArgs[1], 10) if !ok { clio.Warn("could not parse stake") } stake = s } var beneficiary string if len(actionArgs) >= 3 { beneficiary = actionArgs[2] } var token *string if len(actionArgs) >= 4 { token = &actionArgs[3] } fees, err := c.tequilapi.GetTransactorFees() if err != nil { clio.Warn(err) return } err = c.tequilapi.RegisterIdentity(address, beneficiary, stake, fees.Registration, token) if err != nil { clio.Warn(errors.Wrap(err, "could not register identity")) return } msg := "Registration started. Topup the identities channel to finish it." if config.GetBool(config.FlagTestnet2) || c.config.GetBoolByFlag(config.FlagTestnet) { msg = "Registration successful, try to connect." } clio.Info(msg) clio.Info(fmt.Sprintf("To explore additional information about the identity use: %s", usageGetIdentity)) } const usageSettle = "settle <providerIdentity>" func (c *cliApp) settle(args []string) { if len(args) != 1 { clio.Info("Usage: " + usageSettle) fees, err := c.tequilapi.GetTransactorFees() if err != nil { clio.Warn("could not get transactor fee: ", err) } trFee := new(big.Float).Quo(new(big.Float).SetInt(fees.Settlement), new(big.Float).SetInt(money.MystSize)) hermesFee := new(big.Float).Quo(new(big.Float).SetInt(big.NewInt(int64(fees.Hermes))), new(big.Float).SetInt(money.MystSize)) clio.Info(fmt.Sprintf("Transactor fee: %v MYST", trFee.String())) clio.Info(fmt.Sprintf("Hermes fee: %v MYST", hermesFee.String())) return } hermesID := c.config.GetStringByFlag(config.FlagHermesID) clio.Info("Waiting for settlement to complete") errChan := make(chan error) go func() { errChan <- c.tequilapi.Settle(identity.FromAddress(args[0]), identity.FromAddress(hermesID), true) }() timeout := time.After(time.Minute * 2) for { select { case <-timeout: fmt.Println() clio.Warn("Settlement timed out") return case <-time.After(time.Millisecond * 500): fmt.Print(".") case err := <-errChan: fmt.Println() if err != nil { clio.Warn("settlement failed: ", err.Error()) return } clio.Info("settlement succeeded") return } } } const usageGetReferralCode = "referralcode <identity>" func (c *cliApp) getReferralCode(actionArgs []string) { if len(actionArgs) != 1 { clio.Info("Usage: " + usageGetReferralCode) return } address := actionArgs[0] res, err := c.tequilapi.IdentityReferralCode(address) if err != nil { clio.Warn(errors.Wrap(err, "could not get referral token")) return } clio.Success(fmt.Sprintf("Your referral token is: %q", res.Token)) } func (c *cliApp) setBeneficiary(actionArgs []string) { const usageSetBeneficiary = "beneficiary <identity> <new beneficiary>" if len(actionArgs) < 2 || len(actionArgs) > 3 { clio.Info("Usage: " + usageSetBeneficiary) return } address := actionArgs[0] beneficiary := actionArgs[1] hermesID := c.config.GetStringByFlag(config.FlagHermesID) err := c.tequilapi.SettleWithBeneficiary(address, beneficiary, hermesID) if err != nil { clio.Warn(errors.Wrap(err, "could not set beneficiary")) return } clio.Info("Waiting for new beneficiary to be set") timeout := time.After(1 * time.Minute) for { select { case <-timeout: clio.Warn("Setting new beneficiary timed out") return case <-time.After(time.Second): data, err := c.tequilapi.Beneficiary(address) if err != nil { clio.Warn(err) } if strings.EqualFold(data.Beneficiary, beneficiary) { clio.Success("New beneficiary address set") return } fmt.Print(".") } } }
1
16,911
Probably missing return here as well
mysteriumnetwork-node
go
@@ -109,6 +109,12 @@ type ConfigSettings struct { // // EXPERIMENTAL: Subject to change. LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"` + + // The interval to pull config. With a non-zero value, will pull config + // from config loader (eg. a http loader) with given interval. + // + // EXPERIMENTAL: Subject to change. + PullInterval Duration `json:"pull_interval,omitempty"` } // IdentityConfig configures management of this server's identity. An identity
1
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "bytes" "context" "crypto" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "errors" "expvar" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/pprof" "net/url" "os" "path" "regexp" "strconv" "strings" "sync" "time" "github.com/caddyserver/caddy/v2/notify" "github.com/caddyserver/certmagic" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" ) // AdminConfig configures Caddy's API endpoint, which is used // to manage Caddy while it is running. type AdminConfig struct { // If true, the admin endpoint will be completely disabled. // Note that this makes any runtime changes to the config // impossible, since the interface to do so is through the // admin endpoint. Disabled bool `json:"disabled,omitempty"` // The address to which the admin endpoint's listener should // bind itself. Can be any single network address that can be // parsed by Caddy. Default: localhost:2019 Listen string `json:"listen,omitempty"` // If true, CORS headers will be emitted, and requests to the // API will be rejected if their `Host` and `Origin` headers // do not match the expected value(s). Use `origins` to // customize which origins/hosts are allowed. If `origins` is // not set, the listen address is the only value allowed by // default. Enforced only on local (plaintext) endpoint. EnforceOrigin bool `json:"enforce_origin,omitempty"` // The list of allowed origins/hosts for API requests. Only needed // if accessing the admin endpoint from a host different from the // socket's network interface or if `enforce_origin` is true. If not // set, the listener address will be the default value. If set but // empty, no origins will be allowed. Enforced only on local // (plaintext) endpoint. Origins []string `json:"origins,omitempty"` // Options pertaining to configuration management. Config *ConfigSettings `json:"config,omitempty"` // Options that establish this server's identity. Identity refers to // credentials which can be used to uniquely identify and authenticate // this server instance. This is required if remote administration is // enabled (but does not require remote administration to be enabled). // Default: no identity management. Identity *IdentityConfig `json:"identity,omitempty"` // Options pertaining to remote administration. By default, remote // administration is disabled. If enabled, identity management must // also be configured, as that is how the endpoint is secured. // See the neighboring "identity" object. // // EXPERIMENTAL: This feature is subject to change. Remote *RemoteAdmin `json:"remote,omitempty"` } // ConfigSettings configures the management of configuration. type ConfigSettings struct { // Whether to keep a copy of the active config on disk. Default is true. // Note that "pulled" dynamic configs (using the neighboring "load" module) // are not persisted; only configs that are pushed to Caddy get persisted. Persist *bool `json:"persist,omitempty"` // Loads a configuration to use. This is helpful if your configs are // managed elsewhere, and you want Caddy to pull its config dynamically // when it starts. The pulled config completely replaces the current // one, just like any other config load. It is an error if a pulled // config is configured to pull another config. // // EXPERIMENTAL: Subject to change. LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"` } // IdentityConfig configures management of this server's identity. An identity // consists of credentials that uniquely verify this instance; for example, // TLS certificates (public + private key pairs). type IdentityConfig struct { // List of names or IP addresses which refer to this server. // Certificates will be obtained for these identifiers so // secure TLS connections can be made using them. Identifiers []string `json:"identifiers,omitempty"` // Issuers that can provide this admin endpoint its identity // certificate(s). Default: ACME issuers configured for // ZeroSSL and Let's Encrypt. Be sure to change this if you // require credentials for private identifiers. IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"` issuers []certmagic.Issuer } // RemoteAdmin enables and configures remote administration. If enabled, // a secure listener enforcing mutual TLS authentication will be started // on a different port from the standard plaintext admin server. // // This endpoint is secured using identity management, which must be // configured separately (because identity management does not depend // on remote administration). See the admin/identity config struct. // // EXPERIMENTAL: Subject to change. type RemoteAdmin struct { // The address on which to start the secure listener. // Default: :2021 Listen string `json:"listen,omitempty"` // List of access controls for this secure admin endpoint. // This configures TLS mutual authentication (i.e. authorized // client certificates), but also application-layer permissions // like which paths and methods each identity is authorized for. AccessControl []*AdminAccess `json:"access_control,omitempty"` } // AdminAccess specifies what permissions an identity or group // of identities are granted. type AdminAccess struct { // Base64-encoded DER certificates containing public keys to accept. // (The contents of PEM certificate blocks are base64-encoded DER.) // Any of these public keys can appear in any part of a verified chain. PublicKeys []string `json:"public_keys,omitempty"` // Limits what the associated identities are allowed to do. // If unspecified, all permissions are granted. Permissions []AdminPermissions `json:"permissions,omitempty"` publicKeys []crypto.PublicKey } // AdminPermissions specifies what kinds of requests are allowed // to be made to the admin endpoint. type AdminPermissions struct { // The API paths allowed. Paths are simple prefix matches. // Any subpath of the specified paths will be allowed. Paths []string `json:"paths,omitempty"` // The HTTP methods allowed for the given paths. Methods []string `json:"methods,omitempty"` } // newAdminHandler reads admin's config and returns an http.Handler suitable // for use in an admin endpoint server, which will be listening on listenAddr. func (admin AdminConfig) newAdminHandler(addr NetworkAddress, remote bool) adminHandler { muxWrap := adminHandler{mux: http.NewServeMux()} // secure the local or remote endpoint respectively if remote { muxWrap.remoteControl = admin.Remote } else { muxWrap.enforceHost = !addr.isWildcardInterface() muxWrap.allowedOrigins = admin.allowedOrigins(addr) } addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) { labels := prometheus.Labels{"path": pattern, "handler": handlerLabel} h = instrumentHandlerCounter( adminMetrics.requestCount.MustCurryWith(labels), h, ) muxWrap.mux.Handle(pattern, h) } // addRoute just calls muxWrap.mux.Handle after // wrapping the handler with error handling addRoute := func(pattern string, handlerLabel string, h AdminHandler) { wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := h.ServeHTTP(w, r) if err != nil { labels := prometheus.Labels{ "path": pattern, "handler": handlerLabel, "method": strings.ToUpper(r.Method), } adminMetrics.requestErrors.With(labels).Inc() } muxWrap.handleError(w, r, err) }) addRouteWithMetrics(pattern, handlerLabel, wrapper) } const handlerLabel = "admin" // register standard config control endpoints addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig)) addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID)) addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop)) // register debugging endpoints addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index)) addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline)) addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile)) addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol)) addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace)) addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler()) // register third-party module endpoints for _, m := range GetModules("admin.api") { router := m.New().(AdminRouter) handlerLabel := m.ID.Name() for _, route := range router.Routes() { addRoute(route.Pattern, handlerLabel, route.Handler) } } return muxWrap } // allowedOrigins returns a list of origins that are allowed. // If admin.Origins is nil (null), the provided listen address // will be used as the default origin. If admin.Origins is // empty, no origins will be allowed, effectively bricking the // endpoint for non-unix-socket endpoints, but whatever. func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []string { uniqueOrigins := make(map[string]struct{}) for _, o := range admin.Origins { uniqueOrigins[o] = struct{}{} } if admin.Origins == nil { if addr.isLoopback() { if addr.IsUnixNetwork() { // RFC 2616, Section 14.26: // "A client MUST include a Host header field in all HTTP/1.1 request // messages. If the requested URI does not include an Internet host // name for the service being requested, then the Host header field MUST // be given with an empty value." uniqueOrigins[""] = struct{}{} } else { uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{} uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{} uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{} } } if !addr.IsUnixNetwork() { uniqueOrigins[addr.JoinHostPort(0)] = struct{}{} } } allowed := make([]string, 0, len(uniqueOrigins)) for origin := range uniqueOrigins { allowed = append(allowed, origin) } return allowed } // replaceLocalAdminServer replaces the running local admin server // according to the relevant configuration in cfg. If no configuration // for the admin endpoint exists in cfg, a default one is used, so // that there is always an admin server (unless it is explicitly // configured to be disabled). func replaceLocalAdminServer(cfg *Config) error { // always be sure to close down the old admin endpoint // as gracefully as possible, even if the new one is // disabled -- careful to use reference to the current // (old) admin endpoint since it will be different // when the function returns oldAdminServer := localAdminServer defer func() { // do the shutdown asynchronously so that any // current API request gets a response; this // goroutine may last a few seconds if oldAdminServer != nil { go func(oldAdminServer *http.Server) { err := stopAdminServer(oldAdminServer) if err != nil { Log().Named("admin").Error("stopping current admin endpoint", zap.Error(err)) } }(oldAdminServer) } }() // always get a valid admin config adminConfig := DefaultAdminConfig if cfg != nil && cfg.Admin != nil { adminConfig = cfg.Admin } // if new admin endpoint is to be disabled, we're done if adminConfig.Disabled { Log().Named("admin").Warn("admin endpoint disabled") return nil } // extract a singular listener address addr, err := parseAdminListenAddr(adminConfig.Listen, DefaultAdminListen) if err != nil { return err } handler := adminConfig.newAdminHandler(addr, false) ln, err := Listen(addr.Network, addr.JoinHostPort(0)) if err != nil { return err } localAdminServer = &http.Server{ Addr: addr.String(), // for logging purposes only Handler: handler, ReadTimeout: 10 * time.Second, ReadHeaderTimeout: 5 * time.Second, IdleTimeout: 60 * time.Second, MaxHeaderBytes: 1024 * 64, } adminLogger := Log().Named("admin") go func() { if err := localAdminServer.Serve(ln); !errors.Is(err, http.ErrServerClosed) { adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err)) } }() adminLogger.Info("admin endpoint started", zap.String("address", addr.String()), zap.Bool("enforce_origin", adminConfig.EnforceOrigin), zap.Strings("origins", handler.allowedOrigins)) if !handler.enforceHost { adminLogger.Warn("admin endpoint on open interface; host checking disabled", zap.String("address", addr.String())) } return nil } // manageIdentity sets up automated identity management for this server. func manageIdentity(ctx Context, cfg *Config) error { if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil { return nil } // set default issuers; this is pretty hacky because we can't // import the caddytls package -- but it works if cfg.Admin.Identity.IssuersRaw == nil { cfg.Admin.Identity.IssuersRaw = []json.RawMessage{ json.RawMessage(`{"module": "zerossl"}`), json.RawMessage(`{"module": "acme"}`), } } // load and provision issuer modules if cfg.Admin.Identity.IssuersRaw != nil { val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw") if err != nil { return fmt.Errorf("loading identity issuer modules: %s", err) } for _, issVal := range val.([]interface{}) { cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer)) } } // we'll make a new cache when we make the CertMagic config, so stop any previous cache if identityCertCache != nil { identityCertCache.Stop() } logger := Log().Named("admin.identity") cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true) // issuers have circular dependencies with the configs because, // as explained in the caddytls package, they need access to the // correct storage and cache to solve ACME challenges for _, issuer := range cfg.Admin.Identity.issuers { // avoid import cycle with caddytls package, so manually duplicate the interface here, yuck if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok { annoying.SetConfig(cmCfg) } } // obtain and renew server identity certificate(s) return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers) } // replaceRemoteAdminServer replaces the running remote admin server // according to the relevant configuration in cfg. It stops any previous // remote admin server and only starts a new one if configured. func replaceRemoteAdminServer(ctx Context, cfg *Config) error { if cfg == nil { return nil } remoteLogger := Log().Named("admin.remote") oldAdminServer := remoteAdminServer defer func() { if oldAdminServer != nil { go func(oldAdminServer *http.Server) { err := stopAdminServer(oldAdminServer) if err != nil { Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err)) } }(oldAdminServer) } }() if cfg.Admin == nil || cfg.Admin.Remote == nil { return nil } addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen) if err != nil { return err } // make the HTTP handler but disable Host/Origin enforcement // because we are using TLS authentication instead handler := cfg.Admin.newAdminHandler(addr, true) // create client certificate pool for TLS mutual auth, and extract public keys // so that we can enforce access controls at the application layer clientCertPool := x509.NewCertPool() for i, accessControl := range cfg.Admin.Remote.AccessControl { for j, certBase64 := range accessControl.PublicKeys { cert, err := decodeBase64DERCert(certBase64) if err != nil { return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err) } accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey) clientCertPool.AddCert(cert) } } // create TLS config that will enforce mutual authentication cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false) tlsConfig := cmCfg.TLSConfig() tlsConfig.NextProtos = nil // this server does not solve ACME challenges tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = clientCertPool // convert logger to stdlib so it can be used by HTTP server serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel) if err != nil { return err } // create secure HTTP server remoteAdminServer = &http.Server{ Addr: addr.String(), // for logging purposes only Handler: handler, TLSConfig: tlsConfig, ReadTimeout: 10 * time.Second, ReadHeaderTimeout: 5 * time.Second, IdleTimeout: 60 * time.Second, MaxHeaderBytes: 1024 * 64, ErrorLog: serverLogger, } // start listener ln, err := Listen(addr.Network, addr.JoinHostPort(0)) if err != nil { return err } ln = tls.NewListener(ln, tlsConfig) go func() { if err := remoteAdminServer.Serve(ln); !errors.Is(err, http.ErrServerClosed) { remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err)) } }() remoteLogger.Info("secure admin remote control endpoint started", zap.String("address", addr.String())) return nil } func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config { if ident == nil { // user might not have configured identity; that's OK, we can still make a // certmagic config, although it'll be mostly useless for remote management ident = new(IdentityConfig) } cmCfg := &certmagic.Config{ Storage: DefaultStorage, // do not act as part of a cluster (this is for the server's local identity) Logger: logger, Issuers: ident.issuers, } if makeCache { identityCertCache = certmagic.NewCache(certmagic.CacheOptions{ GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) { return cmCfg, nil }, }) } return certmagic.New(identityCertCache, *cmCfg) } // IdentityCredentials returns this instance's configured, managed identity credentials // that can be used in TLS client authentication. func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) { if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil { return nil, fmt.Errorf("no server identity configured") } ident := ctx.cfg.Admin.Identity if len(ident.Identifiers) == 0 { return nil, fmt.Errorf("no identifiers configured") } if logger == nil { logger = Log() } magic := ident.certmagicConfig(logger, false) return magic.ClientCredentials(ctx, ident.Identifiers) } // enforceAccessControls enforces application-layer access controls for r based on remote. // It expects that the TLS server has already established at least one verified chain of // trust, and then looks for a matching, authorized public key that is allowed to access // the defined path(s) using the defined method(s). func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error { for _, chain := range r.TLS.VerifiedChains { for _, peerCert := range chain { for _, adminAccess := range remote.AccessControl { for _, allowedKey := range adminAccess.publicKeys { // see if we found a matching public key; the TLS server already verified the chain // so we know the client possesses the associated private key; this handy interface // doesn't appear to be defined anywhere in the std lib, but was implemented here: // https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool }) if !ok || !comparer.Equal(allowedKey) { continue } // key recognized; make sure its HTTP request is permitted for _, accessPerm := range adminAccess.Permissions { // verify method methodFound := accessPerm.Methods == nil for _, method := range accessPerm.Methods { if method == r.Method { methodFound = true break } } if !methodFound { return APIError{ HTTPStatus: http.StatusForbidden, Message: "not authorized to use this method", } } // verify path pathFound := accessPerm.Paths == nil for _, allowedPath := range accessPerm.Paths { if strings.HasPrefix(r.URL.Path, allowedPath) { pathFound = true break } } if !pathFound { return APIError{ HTTPStatus: http.StatusForbidden, Message: "not authorized to access this path", } } } // public key authorized, method and path allowed return nil } } } } // in theory, this should never happen; with an unverified chain, the TLS server // should not accept the connection in the first place, and the acceptable cert // pool is configured using the same list of public keys we verify against return APIError{ HTTPStatus: http.StatusUnauthorized, Message: "client identity not authorized", } } func stopAdminServer(srv *http.Server) error { if srv == nil { return fmt.Errorf("no admin server") } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() err := srv.Shutdown(ctx) if err != nil { return fmt.Errorf("shutting down admin server: %v", err) } Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr)) return nil } // AdminRouter is a type which can return routes for the admin API. type AdminRouter interface { Routes() []AdminRoute } // AdminRoute represents a route for the admin endpoint. type AdminRoute struct { Pattern string Handler AdminHandler } type adminHandler struct { mux *http.ServeMux // security for local/plaintext) endpoint, on by default enforceOrigin bool enforceHost bool allowedOrigins []string // security for remote/encrypted endpoint remoteControl *RemoteAdmin } // ServeHTTP is the external entry point for API requests. // It will only be called once per request. func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { log := Log().Named("admin.api").With( zap.String("method", r.Method), zap.String("host", r.Host), zap.String("uri", r.RequestURI), zap.String("remote_addr", r.RemoteAddr), zap.Reflect("headers", r.Header), ) if r.TLS != nil { log = log.With( zap.Bool("secure", true), zap.Int("verified_chains", len(r.TLS.VerifiedChains)), ) } if r.RequestURI == "/metrics" { log.Debug("received request") } else { log.Info("received request") } h.serveHTTP(w, r) } // serveHTTP is the internal entry point for API requests. It may // be called more than once per request, for example if a request // is rewritten (i.e. internal redirect). func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) { if h.remoteControl != nil { // enforce access controls on secure endpoint if err := h.remoteControl.enforceAccessControls(r); err != nil { h.handleError(w, r, err) return } } if strings.Contains(r.Header.Get("Upgrade"), "websocket") { // I've never been able demonstrate a vulnerability myself, but apparently // WebSocket connections originating from browsers aren't subject to CORS // restrictions, so we'll just be on the safe side h.handleError(w, r, fmt.Errorf("websocket connections aren't allowed")) return } if h.enforceHost { // DNS rebinding mitigation err := h.checkHost(r) if err != nil { h.handleError(w, r, err) return } } if h.enforceOrigin { // cross-site mitigation origin, err := h.checkOrigin(r) if err != nil { h.handleError(w, r, err) return } if r.Method == http.MethodOptions { w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE") w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Cache-Control") w.Header().Set("Access-Control-Allow-Credentials", "true") } w.Header().Set("Access-Control-Allow-Origin", origin) } h.mux.ServeHTTP(w, r) } func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } if err == errInternalRedir { h.serveHTTP(w, r) return } apiErr, ok := err.(APIError) if !ok { apiErr = APIError{ HTTPStatus: http.StatusInternalServerError, Err: err, } } if apiErr.HTTPStatus == 0 { apiErr.HTTPStatus = http.StatusInternalServerError } if apiErr.Message == "" && apiErr.Err != nil { apiErr.Message = apiErr.Err.Error() } Log().Named("admin.api").Error("request error", zap.Error(err), zap.Int("status_code", apiErr.HTTPStatus), ) w.Header().Set("Content-Type", "application/json") w.WriteHeader(apiErr.HTTPStatus) encErr := json.NewEncoder(w).Encode(apiErr) if encErr != nil { Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr)) } } // checkHost returns a handler that wraps next such that // it will only be called if the request's Host header matches // a trustworthy/expected value. This helps to mitigate DNS // rebinding attacks. func (h adminHandler) checkHost(r *http.Request) error { var allowed bool for _, allowedHost := range h.allowedOrigins { if r.Host == allowedHost { allowed = true break } } if !allowed { return APIError{ HTTPStatus: http.StatusForbidden, Err: fmt.Errorf("host not allowed: %s", r.Host), } } return nil } // checkOrigin ensures that the Origin header, if // set, matches the intended target; prevents arbitrary // sites from issuing requests to our listener. It // returns the origin that was obtained from r. func (h adminHandler) checkOrigin(r *http.Request) (string, error) { origin := h.getOriginHost(r) if origin == "" { return origin, APIError{ HTTPStatus: http.StatusForbidden, Err: fmt.Errorf("missing required Origin header"), } } if !h.originAllowed(origin) { return origin, APIError{ HTTPStatus: http.StatusForbidden, Err: fmt.Errorf("client is not allowed to access from origin %s", origin), } } return origin, nil } func (h adminHandler) getOriginHost(r *http.Request) string { origin := r.Header.Get("Origin") if origin == "" { origin = r.Header.Get("Referer") } originURL, err := url.Parse(origin) if err == nil && originURL.Host != "" { origin = originURL.Host } return origin } func (h adminHandler) originAllowed(origin string) bool { for _, allowedOrigin := range h.allowedOrigins { originCopy := origin if !strings.Contains(allowedOrigin, "://") { // no scheme specified, so allow both originCopy = strings.TrimPrefix(originCopy, "http://") originCopy = strings.TrimPrefix(originCopy, "https://") } if originCopy == allowedOrigin { return true } } return false } func handleConfig(w http.ResponseWriter, r *http.Request) error { switch r.Method { case http.MethodGet: w.Header().Set("Content-Type", "application/json") err := readConfig(r.URL.Path, w) if err != nil { return APIError{HTTPStatus: http.StatusBadRequest, Err: err} } return nil case http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete: // DELETE does not use a body, but the others do var body []byte if r.Method != http.MethodDelete { if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct), } } buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) _, err := io.Copy(buf, r.Body) if err != nil { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("reading request body: %v", err), } } body = buf.Bytes() } forceReload := r.Header.Get("Cache-Control") == "must-revalidate" err := changeConfig(r.Method, r.URL.Path, body, forceReload) if err != nil { return err } default: return APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method %s not allowed", r.Method), } } return nil } func handleConfigID(w http.ResponseWriter, r *http.Request) error { idPath := r.URL.Path parts := strings.Split(idPath, "/") if len(parts) < 3 || parts[2] == "" { return fmt.Errorf("request path is missing object ID") } if parts[0] != "" || parts[1] != "id" { return fmt.Errorf("malformed object path") } id := parts[2] // map the ID to the expanded path currentCfgMu.RLock() expanded, ok := rawCfgIndex[id] defer currentCfgMu.RUnlock() if !ok { return fmt.Errorf("unknown object ID '%s'", id) } // piece the full URL path back together parts = append([]string{expanded}, parts[3:]...) r.URL.Path = path.Join(parts...) return errInternalRedir } func handleStop(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost { return APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed"), } } if err := notify.NotifyStopping(); err != nil { Log().Error("unable to notify stopping to service manager", zap.Error(err)) } exitProcess(Log().Named("admin.api")) return nil } // unsyncedConfigAccess traverses into the current config and performs // the operation at path according to method, using body and out as // needed. This is a low-level, unsynchronized function; most callers // will want to use changeConfig or readConfig instead. This requires a // read or write lock on currentCfgMu, depending on method (GET needs // only a read lock; all others need a write lock). func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error { var err error var val interface{} // if there is a request body, decode it into the // variable that will be set in the config according // to method and path if len(body) > 0 { err = json.Unmarshal(body, &val) if err != nil { return fmt.Errorf("decoding request body: %v", err) } } enc := json.NewEncoder(out) cleanPath := strings.Trim(path, "/") if cleanPath == "" { return fmt.Errorf("no traversable path") } parts := strings.Split(cleanPath, "/") if len(parts) == 0 { return fmt.Errorf("path missing") } // A path that ends with "..." implies: // 1) the part before it is an array // 2) the payload is an array // and means that the user wants to expand the elements // in the payload array and append each one into the // destination array, like so: // array = append(array, elems...) // This special case is handled below. ellipses := parts[len(parts)-1] == "..." if ellipses { parts = parts[:len(parts)-1] } var ptr interface{} = rawCfg traverseLoop: for i, part := range parts { switch v := ptr.(type) { case map[string]interface{}: // if the next part enters a slice, and the slice is our destination, // handle it specially (because appending to the slice copies the slice // header, which does not replace the original one like we want) if arr, ok := v[part].([]interface{}); ok && i == len(parts)-2 { var idx int if method != http.MethodPost { idxStr := parts[len(parts)-1] idx, err = strconv.Atoi(idxStr) if err != nil { return fmt.Errorf("[%s] invalid array index '%s': %v", path, idxStr, err) } if idx < 0 || idx >= len(arr) { return fmt.Errorf("[%s] array index out of bounds: %s", path, idxStr) } } switch method { case http.MethodGet: err = enc.Encode(arr[idx]) if err != nil { return fmt.Errorf("encoding config: %v", err) } case http.MethodPost: if ellipses { valArray, ok := val.([]interface{}) if !ok { return fmt.Errorf("final element is not an array") } v[part] = append(arr, valArray...) } else { v[part] = append(arr, val) } case http.MethodPut: // avoid creation of new slice and a second copy (see // https://github.com/golang/go/wiki/SliceTricks#insert) arr = append(arr, nil) copy(arr[idx+1:], arr[idx:]) arr[idx] = val v[part] = arr case http.MethodPatch: arr[idx] = val case http.MethodDelete: v[part] = append(arr[:idx], arr[idx+1:]...) default: return fmt.Errorf("unrecognized method %s", method) } break traverseLoop } if i == len(parts)-1 { switch method { case http.MethodGet: err = enc.Encode(v[part]) if err != nil { return fmt.Errorf("encoding config: %v", err) } case http.MethodPost: // if the part is an existing list, POST appends to // it, otherwise it just sets or creates the value if arr, ok := v[part].([]interface{}); ok { if ellipses { valArray, ok := val.([]interface{}) if !ok { return fmt.Errorf("final element is not an array") } v[part] = append(arr, valArray...) } else { v[part] = append(arr, val) } } else { v[part] = val } case http.MethodPut: if _, ok := v[part]; ok { return fmt.Errorf("[%s] key already exists: %s", path, part) } v[part] = val case http.MethodPatch: if _, ok := v[part]; !ok { return fmt.Errorf("[%s] key does not exist: %s", path, part) } v[part] = val case http.MethodDelete: delete(v, part) default: return fmt.Errorf("unrecognized method %s", method) } } else { // if we are "PUTting" a new resource, the key(s) in its path // might not exist yet; that's OK but we need to make them as // we go, while we still have a pointer from the level above if v[part] == nil && method == http.MethodPut { v[part] = make(map[string]interface{}) } ptr = v[part] } case []interface{}: partInt, err := strconv.Atoi(part) if err != nil { return fmt.Errorf("[/%s] invalid array index '%s': %v", strings.Join(parts[:i+1], "/"), part, err) } if partInt < 0 || partInt >= len(v) { return fmt.Errorf("[/%s] array index out of bounds: %s", strings.Join(parts[:i+1], "/"), part) } ptr = v[partInt] default: return fmt.Errorf("invalid traversal path at: %s", strings.Join(parts[:i+1], "/")) } } return nil } // RemoveMetaFields removes meta fields like "@id" from a JSON message // by using a simple regular expression. (An alternate way to do this // would be to delete them from the raw, map[string]interface{} // representation as they are indexed, then iterate the index we made // and add them back after encoding as JSON, but this is simpler.) func RemoveMetaFields(rawJSON []byte) []byte { return idRegexp.ReplaceAllFunc(rawJSON, func(in []byte) []byte { // matches with a comma on both sides (when "@id" property is // not the first or last in the object) need to keep exactly // one comma for correct JSON syntax comma := []byte{','} if bytes.HasPrefix(in, comma) && bytes.HasSuffix(in, comma) { return comma } return []byte{} }) } // AdminHandler is like http.Handler except ServeHTTP may return an error. // // If any handler encounters an error, it should be returned for proper // handling. type AdminHandler interface { ServeHTTP(http.ResponseWriter, *http.Request) error } // AdminHandlerFunc is a convenience type like http.HandlerFunc. type AdminHandlerFunc func(http.ResponseWriter, *http.Request) error // ServeHTTP implements the Handler interface. func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { return f(w, r) } // APIError is a structured error that every API // handler should return for consistency in logging // and client responses. If Message is unset, then // Err.Error() will be serialized in its place. type APIError struct { HTTPStatus int `json:"-"` Err error `json:"-"` Message string `json:"error"` } func (e APIError) Error() string { if e.Err != nil { return e.Err.Error() } return e.Message } // parseAdminListenAddr extracts a singular listen address from either addr // or defaultAddr, returning the network and the address of the listener. func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) { input := addr if input == "" { input = defaultAddr } listenAddr, err := ParseNetworkAddress(input) if err != nil { return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err) } if listenAddr.PortRangeSize() != 1 { return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr) } return listenAddr, nil } // decodeBase64DERCert base64-decodes, then DER-decodes, certStr. func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { derBytes, err := base64.StdEncoding.DecodeString(certStr) if err != nil { return nil, err } return x509.ParseCertificate(derBytes) } var ( // DefaultAdminListen is the address for the local admin // listener, if none is specified at startup. DefaultAdminListen = "localhost:2019" // DefaultRemoteAdminListen is the address for the remote // (TLS-authenticated) admin listener, if enabled and not // specified otherwise. DefaultRemoteAdminListen = ":2021" // DefaultAdminConfig is the default configuration // for the local administration endpoint. DefaultAdminConfig = &AdminConfig{ Listen: DefaultAdminListen, } ) // PIDFile writes a pidfile to the file at filename. It // will get deleted before the process gracefully exits. func PIDFile(filename string) error { pid := []byte(strconv.Itoa(os.Getpid()) + "\n") err := ioutil.WriteFile(filename, pid, 0600) if err != nil { return err } pidfile = filename return nil } // idRegexp is used to match ID fields and their associated values // in the config. It also matches adjacent commas so that syntax // can be preserved no matter where in the object the field appears. // It supports string and most numeric values. var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`) // pidfile is the name of the pidfile, if any. var pidfile string // errInternalRedir indicates an internal redirect // and is useful when admin API handlers rewrite // the request; in that case, authentication and // authorization needs to happen again for the // rewritten request. var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required") const ( rawConfigKey = "config" idKey = "@id" ) var bufPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } // keep a reference to admin endpoint singletons while they're active var ( localAdminServer, remoteAdminServer *http.Server identityCertCache *certmagic.Cache )
1
16,669
On second thought... this affects how often the function specified by the `load` property is run. So I wonder if, for consistency, we should call this `LoadInterval` instead. Does that make sense?
caddyserver-caddy
go
@@ -2,7 +2,8 @@ var exported = {}, common = require('../../../api/utils/common.js'), crypto = require('crypto'), countlyCommon = require('../../../api/lib/countly.common.js'), - plugins = require('../../pluginManager.js'); + plugins = require('../../pluginManager.js'), + localize = require('../../../api/utils/localization.js'); (function() { /** * register internalEvent
1
var exported = {}, common = require('../../../api/utils/common.js'), crypto = require('crypto'), countlyCommon = require('../../../api/lib/countly.common.js'), plugins = require('../../pluginManager.js'); (function() { /** * register internalEvent */ plugins.internalEvents.push('[CLY]_star_rating'); plugins.internalDrillEvents.push("[CLY]_star_rating"); plugins.internalOmitSegments["[CLY]_star_rating"] = ["email", "comment", "widget_id", "contactMe"]; var createFeedbackWidget = function(ob) { var obParams = ob.params; var validateUserForWrite = ob.validateUserForWriteAPI; validateUserForWrite(function(params) { var popupHeaderText = params.qstring.popup_header_text; var popupCommentCallout = params.qstring.popup_comment_callout; var popupEmailCallout = params.qstring.popup_email_callout; var popupButtonCallout = params.qstring.popup_button_callout; var popupThanksMessage = params.qstring.popup_thanks_message; var triggerPosition = params.qstring.trigger_position; var triggerBgColor = params.qstring.trigger_bg_color; var triggerFontColor = params.qstring.trigger_font_color; var triggerButtonText = params.qstring.trigger_button_text; var targetDevices = {}; try { targetDevices = JSON.parse(params.qstring.target_devices); } catch (jsonParseError) { targetDevices = { desktop: true, phone: true, tablet: true }; } var targetPage = params.qstring.target_page; var targetPages = []; try { targetPages = JSON.parse(params.qstring.target_pages); } catch (jsonParseError) { targetPages = ["/"]; } var isActive = params.qstring.is_active; var hideSticker = params.qstring.hide_sticker || false; var app = params.qstring.app_id; var collectionName = "feedback_widgets"; var widget = { "popup_header_text": popupHeaderText, "popup_comment_callout": popupCommentCallout, "popup_email_callout": popupEmailCallout, "popup_button_callout": popupButtonCallout, "popup_thanks_message": popupThanksMessage, "trigger_position": triggerPosition, "trigger_bg_color": triggerBgColor, "trigger_font_color": triggerFontColor, "trigger_button_text": triggerButtonText, "target_devices": targetDevices, "target_page": targetPage, "target_pages": targetPages, "is_active": isActive, "hide_sticker": hideSticker, "app_id": app }; common.db.collection(collectionName).insert(widget, function(err) { if (!err) { common.returnMessage(ob.params, 201, "Success"); plugins.dispatch("/systemlogs", {params: params, action: "Widget added", data: widget}); return true; } else { common.returnMessage(ob.params, 500, err.message); return false; } }); }, obParams); return true; }; var removeFeedbackWidget = function(ob) { var obParams = ob.params; var validateUserForWrite = ob.validateUserForWriteAPI; validateUserForWrite(function(params) { var widgetId = params.qstring.widget_id; var app = params.qstring.app_id; var withData = params.qstring.with_data; var collectionName = "feedback_widgets"; common.db.collection(collectionName).findOne({"_id": common.db.ObjectID(widgetId) }, function(err, widget) { if (!err && widget) { common.db.collection(collectionName).remove({ "_id": common.db.ObjectID(widgetId) }, function(removeWidgetErr) { if (!removeWidgetErr) { // remove widget and related data if (withData) { removeWidgetData(widgetId, app, function(removeError) { if (removeError) { common.returnMessage(ob.params, 500, removeError.message); return false; } else { common.returnMessage(ob.params, 200, 'Success'); plugins.dispatch("/systemlogs", {params: params, action: "Widget deleted with data", data: widget}); return true; } }); } // remove only widget else { common.returnMessage(ob.params, 200, 'Success'); plugins.dispatch("/systemlogs", {params: params, action: "Widget deleted", data: widget}); return true; } } else { common.returnMessage(ob.params, 500, removeWidgetErr.message); return false; } }); } else { common.returnMessage(ob.params, 404, "Widget not found"); return false; } }); }, obParams); return true; }; var editFeedbackWidget = function(ob) { var obParams = ob.params; var validateUserForWrite = ob.validateUserForWriteAPI; validateUserForWrite(function(params) { var id = params.qstring.widget_id; var collectionName = "feedback_widgets"; var changes = {}; try { var widgetId = common.db.ObjectID(id); } catch (e) { common.returnMessage(params, 500, 'Invalid widget id.'); return false; } if (params.qstring.popup_header_text) { changes.popup_header_text = params.qstring.popup_header_text; } if (params.qstring.popup_email_callout) { changes.popup_email_callout = params.qstring.popup_email_callout; } if (params.qstring.popup_button_callout) { changes.popup_button_callout = params.qstring.popup_button_callout; } if (params.qstring.popup_comment_callout) { changes.popup_comment_callout = params.qstring.popup_comment_callout; } if (params.qstring.popup_thanks_message) { changes.popup_thanks_message = params.qstring.popup_thanks_message; } if (params.qstring.trigger_position) { changes.trigger_position = params.qstring.trigger_position; } if (params.qstring.trigger_bg_color) { changes.trigger_bg_color = params.qstring.trigger_bg_color; } if (params.qstring.trigger_button_text) { changes.trigger_button_text = params.qstring.trigger_button_text; } if (params.qstring.trigger_font_color) { changes.trigger_font_color = params.qstring.trigger_font_color; } if (params.qstring.target_devices) { try { changes.target_devices = JSON.parse(params.qstring.target_devices); } catch (jsonParseError) { changes.target_devices = { desktop: true, phone: true, tablet: true }; } } if (params.qstring.target_page) { changes.target_page = params.qstring.target_page; } if (params.qstring.target_pages) { try { changes.target_pages = JSON.parse(params.qstring.target_pages); } catch (jsonParseError) { changes.target_pages = ["/"]; } } if (params.qstring.is_active) { changes.is_active = params.qstring.is_active; } if (params.qstring.hide_sticker) { changes.hide_sticker = params.qstring.hide_sticker; } common.db.collection(collectionName).findOne({"_id": widgetId}, function(err, widget) { if (!err && widget) { common.db.collection(collectionName).findAndModify({ _id: widgetId }, {}, {$set: changes}, function(updateWidgetErr) { if (!updateWidgetErr) { common.returnMessage(params, 200, 'Success'); plugins.dispatch("/systemlogs", {params: params, action: "Widget edited", data: {before: widget, update: changes}}); return true; } else { common.returnMessage(params, 500, updateWidgetErr.message); return false; } }); } else { common.returnMessage(params, 404, "Widget not found"); } }); }, obParams); return true; }; var removeWidgetData = function(widgetId, app, callback) { var collectionName = "feedback" + app; common.db.collection(collectionName).remove({ "widget_id": widgetId }, function(err) { if (!err) { callback(null); } else { callback(err); } }); }; plugins.register("/i", function(ob) { var params = ob.params; if (params.qstring.events && params.qstring.events.length && Array.isArray(params.qstring.events)) { params.qstring.events = params.qstring.events.filter(function(currEvent) { if (currEvent.key === "[CLY]_star_rating") { /** * register for process new rating event data. * the original event format like: * { key: '[CLY]_star_rating', count:1, sum:1, segmentation:{ platform:"iOS", version:"3.2", rating:2} * this function will add a field call "platform_version_rate" in segmentation. */ currEvent.segmentation.platform = currEvent.segmentation.platform || "undefined"; //because we have a lot of old data with undefined currEvent.segmentation.rating = currEvent.segmentation.rating || "undefined"; currEvent.segmentation.widget_id = currEvent.segmentation.widget_id || "undefined"; currEvent.segmentation.app_version = currEvent.segmentation.app_version || "undefined"; currEvent.segmentation.platform_version_rate = currEvent.segmentation.platform + "**" + currEvent.segmentation.app_version + "**" + currEvent.segmentation.rating + "**" + currEvent.segmentation.widget_id + "**"; // is provided email & comment fields if ((currEvent.segmentation.email && currEvent.segmentation.email.length > 0) || (currEvent.segmentation.comment && currEvent.segmentation.comment.length > 0)) { var collectionName = 'feedback' + ob.params.app._id; common.db.collection(collectionName).insert({ "email": currEvent.segmentation.email, "comment": currEvent.segmentation.comment, "ts": (currEvent.timestamp) ? common.initTimeObj(params.appTimezone, currEvent.timestamp).timestamp : params.time.timestamp, "device_id": params.qstring.device_id, "cd": new Date(), "uid": params.app_user.uid, "contact_me": currEvent.segmentation.contactMe, "rating": currEvent.segmentation.rating, "platform": currEvent.segmentation.platform, "app_version": currEvent.segmentation.app_version, "widget_id": currEvent.segmentation.widget_id }, function(err) { if (err) { return false; } }); } } return true; }); } }); /* * @apiName: CreateFeedbackWidget * @type: GET * @apiDescription: Create web feedback widget from Countly web application * @apiParam: 'popup_header_text', Header text of feedback popup * @apiParam: 'popup_email_callout', "Contact me by e-mail" text of * feedback popup * @apiParam: 'popup_comment_callout', "Add comment" text of feedback popup * @apiParam: 'popup_thanks_message', Message of thanks popup * @apiParam: 'trigger_position', position of feedback trigger sticky, * should be one of these ['mleft','mright','bleft','bright'] * @apiParam: 'trigger_bg_color', #hex code of background color of feedback * trigger sticky button * @apiParam: 'trigger_font_color', #hex code of font color of feedback * trigger sticky button * @apiParam: 'trigger_button_text', text of feedback sticky button * @apiParam: 'target_devices', target device array of feedback * fe: ['mobile','tablet'] * @apiParam: 'target_page', target page of feedback, should be one of * these values ['all','selected'] * @apiParam: 'target_pages', if 'target_page' property set as 'selected', * this param should be provided as array of selected pages * fe: ['/home','/login'] * @apiParam: 'is_active', is that feedback should set active as default? * @apiParam: 'hide_sticker', is that feedback should set hidden as default? * @apiParam: 'app_id', app_id of related application */ plugins.register("/i/feedback/widgets/create", createFeedbackWidget); /* * @apiName: RemoveFeedbackWidget * @type: GET * @apiDescription: Remove web feedback widget from Countly web application * @apiParam: 'widget_id', Id of widget which will be removed * @apiParam: 'with_data', Boolean property for remove data belong to widget which will be removed with it * @apiParam: 'app_id', app_id of related application */ plugins.register("/i/feedback/widgets/remove", removeFeedbackWidget); /* * @apiName: EditFeedbackWidget * @type: GET * @apiDescription: Edit web feedback widget settings from Countly web application * @apiParam: 'popup_header_text', Header text of feedback popup * @apiParam: 'popup_email_callout', "Contact me by e-mail" text of * feedback popup * @apiParam: 'popup_comment_callout', "Add comment" text of feedback popup * @apiParam: 'popup_thanks_message', Message of thanks popup * @apiParam: 'trigger_position', position of feedback trigger sticky, * should be one of these ['mleft','mright','bleft','bright'] * @apiParam: 'trigger_bg_color', #hex code of background color of feedback * trigger sticky button * @apiParam: 'trigger_font_color', #hex code of font color of feedback * trigger sticky button * @apiParam: 'trigger_button_text', text of feedback sticky button * @apiParam: 'target_devices', target device array of feedback * fe: ['mobile','tablet'] * @apiParam: 'target_page', target page of feedback, should be one of * these values ['all','selected'] * @apiParam: 'target_pages', if 'target_page' property set as 'selected', * this param should be provided as array of selected pages * fe: ['/home','/login'] * @apiParam: 'is_active', is that feedback should set active as default? * @apiParam: 'app_id', app_id of related application */ plugins.register("/i/feedback/widgets/edit", editFeedbackWidget); /* * @apiName: GetFeedbackData * @apiDescription: Get feedback data with or without filters * @apiParam: 'widget_id', Id of related widget * @apiParam: 'rating', filter by rating * @apiParam: 'device_id', filter by device_id * @apiParam: 'app_id', app_id of related application */ plugins.register('/o/feedback/data', function(ob) { var params = ob.params; var app = params.qstring.app_id; var collectionName = 'feedback' + app; var query = {}; query.ts = countlyCommon.getTimestampRangeQuery(params, true); if (params.qstring.widget_id) { query.widget_id = params.qstring.widget_id; } if (params.qstring.rating) { query.rating = parseInt(params.qstring.rating); } if (params.qstring.version) { query.app_version = params.qstring.version; } if (params.qstring.platform) { query.platform = params.qstring.platform; } if (params.qstring.device_id) { query.device_id = params.qstring.device_id; } var validateUserForRead = ob.validateUserForDataReadAPI; validateUserForRead(params, function() { common.db.collection(collectionName).find(query).toArray(function(err, docs) { if (!err) { common.returnOutput(params, docs); return true; } else { common.returnMessage(params, 500, err.message); return false; } }); }); return true; }); /* * @apiName: GetMultipleWidgetsById * @apiDescription: Get feedback widgets with or without filters * @apiParam: 'app_key', app_key of related application provided by sdk request */ plugins.register('/o/feedback/multiple-widgets-by-id', function(ob) { var params = ob.params; var collectionName = 'feedback_widgets'; if (params.qstring.widgets && params.qstring.widgets.length > 0) { var widgets = []; try { widgets = JSON.parse(params.qstring.widgets); } catch (jsonParseError) { widgets = []; } var widgetIdsArray = widgets.map(function(d) { return common.db.ObjectID(d); }); common.db.collection(collectionName).find({ _id: { $in: widgetIdsArray } }).toArray(function(err, docs) { if (!err) { common.returnOutput(params, docs); return true; } else { common.returnMessage(params, 500, err.message); return false; } }); } else { common.returnMessage(params, 500, 'You should provide widget ids array.'); return false; } return true; }); /* * @apiName: GetWidgetsData * @apiDescription: Get feedback widgets with or without filters * @apiParam: 'app_id', app_id of related application * @apiParam: 'is_active', is_active option for widgets */ plugins.register('/o/feedback/widgets', function(ob) { var params = ob.params; var validateUserForRead = ob.validateUserForDataReadAPI; validateUserForRead(params, function() { var collectionName = 'feedback_widgets'; var query = {}; if (params.qstring.is_active) { query.is_active = params.qstring.is_active; } if (params.qstring.app_id) { query.app_id = params.qstring.app_id; } common.db.collection(collectionName).find(query).toArray(function(err, docs) { if (!err) { common.returnOutput(params, docs); return true; } else { common.returnMessage(params, 500, err.message); return false; } }); }); return true; }); /* * @apiName: GetOneWidget */ plugins.register('/o/feedback/widget', function(ob) { var params = ob.params; // check widget_id param is provided? if (!params.qstring.widget_id) { common.returnMessage(ob.params, 400, 'Missing parameter "widget_id"'); return true; } // for request which sent from countly with app_key without app_id var widgetId = params.qstring.widget_id; var collectionName = 'feedback_widgets'; try { widgetId = common.db.ObjectID(widgetId); } catch (e) { common.returnMessage(params, 500, 'Invalid widget id.'); return true; } common.db.collection(collectionName).findOne({ "_id": widgetId }, function(err, doc) { if (err) { common.returnMessage(params, 500, err.message); } else if (!doc) { common.returnMessage(params, 404, 'Widget not found.'); } else { common.returnOutput(params, doc); } }); return true; }); /** * register for fetching platform and version metadata. */ plugins.register('/o', function(ob) { var params = ob.params; if (params.qstring.method === 'star') { if (params.qstring.period) { //check if period comes from datapicker if (params.qstring.period.indexOf(",") !== -1) { try { params.qstring.period = JSON.parse(params.qstring.period); } catch (SyntaxError) { common.returnMessage(params, 400, 'Bad request parameter: period'); return true; } } else { switch (params.qstring.period) { case "month": case "day": case "yesterday": case "hour": break; default: if (!/([0-9]+)days/.test(params.qstring.period)) { common.returnMessage(params, 400, 'Bad request parameter: period'); return true; } break; } } } else { common.returnMessage(params, 400, 'Missing request parameter: period'); return true; } countlyCommon.setPeriod(params.qstring.period, true); var periodObj = countlyCommon.periodObj; var collectionName = 'events' + crypto.createHash('sha1').update('[CLY]_star_rating' + params.qstring.app_id).digest('hex'); var documents = []; for (var i = 0; i < periodObj.reqZeroDbDateIds.length; i++) { documents.push("no-segment_" + periodObj.reqZeroDbDateIds[i]); for (var m = 0; m < common.base64.length; m++) { documents.push("no-segment_" + periodObj.reqZeroDbDateIds[i] + "_" + common.base64[m]); } } common.db.collection(collectionName).find({ '_id': { $in: documents } }).toArray(function(err, docs) { if (!err) { var result = {}; docs.forEach(function(doc) { if (!doc.meta) { doc.meta = {}; } if (!doc.meta.platform_version_rate) { doc.meta.platform_version_rate = []; } if (doc.meta_v2 && doc.meta_v2.platform_version_rate) { common.arrayAddUniq(doc.meta.platform_version_rate, Object.keys(doc.meta_v2.platform_version_rate)); } doc.meta.platform_version_rate.forEach(function(item) { var data = item.split('**'); if (result[data[0]] === undefined) { result[data[0]] = []; } if (result[data[0]].indexOf(data[1]) === -1) { result[data[0]].push(data[1]); } }); }); common.returnOutput(params, result); return true; } }); return true; } return false; }); plugins.register("/i/apps/create", function(ob) { var appId = ob.appId; common.db.collection('feedback' + appId).ensureIndex({ "uid": 1 }, function() {}); common.db.collection('feedback' + appId).ensureIndex({ "ts": 1 }, function() {}); }); plugins.register("/i/apps/delete", function(ob) { var appId = ob.appId; common.db.collection('feedback_widgets').drop(function() {}); common.db.collection('feedback' + appId).drop(function() {}); common.db.collection("events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).drop(function() {}); if (common.drillDb) { common.drillDb.collection("drill_events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).drop(function() {}); } }); plugins.register("/i/apps/clear", function(ob) { var appId = ob.appId; common.db.collection('feedback' + appId).remove({ ts: { $lt: ob.moment.unix() } }, function() {}); common.db.collection("events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).remove({ ts: { $lt: ob.moment.unix() } }, function() {}); if (common.drillDb) { common.drillDb.collection("drill_events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).remove({ ts: { $lt: ob.moment.unix() } }, function() {}); } }); plugins.register("/i/apps/clear_all", function(ob) { var appId = ob.appId; common.db.collection('feedback' + appId).drop(function() { common.db.collection('feedback' + appId).ensureIndex({ "uid": 1 }, function() {}); common.db.collection('feedback' + appId).ensureIndex({ "ts": 1 }, function() {}); }); common.db.collection("events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).drop(function() {}); if (common.drillDb) { common.drillDb.collection("drill_events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).drop(function() {}); } }); plugins.register("/i/apps/reset", function(ob) { var appId = ob.appId; common.db.collection('feedback_widgets').drop(function() {}); common.db.collection('feedback' + appId).drop(function() { common.db.collection('feedback' + appId).ensureIndex({ "uid": 1 }, function() {}); common.db.collection('feedback' + appId).ensureIndex({ "ts": 1 }, function() {}); }); common.db.collection("events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).drop(function() {}); if (common.drillDb) { common.drillDb.collection("drill_events" + crypto.createHash('sha1').update("[CLY]_star_rating" + appId).digest('hex')).drop(function() {}); } }); plugins.register("/i/device_id", function(ob) { var appId = ob.app_id; var oldUid = ob.oldUser.uid; var newUid = ob.newUser.uid; if (oldUid !== newUid) { common.db.collection("feedback" + appId).update({ uid: oldUid }, { '$set': { uid: newUid } }, { multi: true }, function() {}); } }); plugins.register("/i/app_users/delete", function(ob) { var appId = ob.app_id; var uids = ob.uids; if (uids && uids.length) { common.db.collection("feedback" + appId).remove({ uid: { $in: uids } }, function() {}); } }); plugins.register("/i/app_users/export", function(ob) { return new Promise(function(resolve) { var uids = ob.uids; if (uids && uids.length) { if (!ob.export_commands.feedback) { ob.export_commands.feedback = []; } ob.export_commands.feedback.push('mongoexport ' + ob.dbstr + ' --collection feedback' + ob.app_id + ' -q \'{uid:{$in: ["' + uids.join('","') + '"]}}\' --out ' + ob.export_folder + '/feedback' + ob.app_id + '.json'); resolve(); } }); }); }(exported)); module.exports = exported;
1
13,243
Now you don't need the localization module here then :)
Countly-countly-server
js
@@ -264,11 +264,16 @@ public class InclusiveMetricsEvaluator { @Override public <T> Boolean in(BoundReference<T> ref, Set<T> literalSet) { - return ROWS_MIGHT_MATCH; + // in(col, {X, Y}) => eq(col, x) OR eq(col, x) + if (literalSet.stream().anyMatch(v -> eq(ref, toLiteral(v)))) { + return ROWS_MIGHT_MATCH; + } + return ROWS_CANNOT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Set<T> literalSet) { + // notIn(col, {X, Y}) => notEq(col, x) AND notEq(col, x) return ROWS_MIGHT_MATCH; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.expressions; import java.nio.ByteBuffer; import java.util.Comparator; import java.util.Map; import java.util.Set; import org.apache.iceberg.DataFile; import org.apache.iceberg.Schema; import org.apache.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import org.apache.iceberg.types.Comparators; import org.apache.iceberg.types.Conversions; import org.apache.iceberg.types.Types.StructType; import org.apache.iceberg.util.BinaryUtil; import static org.apache.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link DataFile} to test whether rows in the file may match. * <p> * This evaluation is inclusive: it returns true if a file may match and false if it cannot match. * <p> * Files are passed to {@link #eval(DataFile)}, which returns true if the file may contain matching * rows and false if the file cannot contain matching rows. Files may be skipped if and only if the * return value of {@code eval} is false. */ public class InclusiveMetricsEvaluator { private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public InclusiveMetricsEvaluator(Schema schema, Expression unbound) { this(schema, unbound, true); } public InclusiveMetricsEvaluator(Schema schema, Expression unbound, boolean caseSensitive) { StructType struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound), caseSensitive); } /** * Test whether the file may contain records that match the expression. * * @param file a data file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(DataFile file) { // TODO: detect the case where a column is missing from the file using file's max field id. return visitor().eval(file); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private boolean eval(DataFile file) { if (file.recordCount() <= 0) { return ROWS_CANNOT_MATCH; } this.valueCounts = file.valueCounts(); this.nullCounts = file.nullValueCounts(); this.lowerBounds = file.lowerBounds(); this.upperBounds = file.upperBounds(); return ExpressionVisitors.visitEvaluator(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match Integer id = ref.fieldId(); if (nullCounts != null && nullCounts.containsKey(id) && nullCounts.get(id) == 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no non-null values, the expression cannot match Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(ref.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(ref.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(ref.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(ref.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(ref.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(ref.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Set<T> literalSet) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Set<T> literalSet) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean startsWith(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); if (containsNullsOnly(id)) { return ROWS_CANNOT_MATCH; } ByteBuffer prefixAsBytes = lit.toByteBuffer(); Comparator<ByteBuffer> comparator = Comparators.unsignedBytes(); if (lowerBounds != null && lowerBounds.containsKey(id)) { ByteBuffer lower = lowerBounds.get(id); // truncate lower bound so that its length in bytes is not greater than the length of prefix int length = Math.min(prefixAsBytes.remaining(), lower.remaining()); int cmp = comparator.compare(BinaryUtil.truncateBinary(lower, length), prefixAsBytes); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } if (upperBounds != null && upperBounds.containsKey(id)) { ByteBuffer upper = upperBounds.get(id); // truncate upper bound so that its length in bytes is not greater than the length of prefix int length = Math.min(prefixAsBytes.remaining(), upper.remaining()); int cmp = comparator.compare(BinaryUtil.truncateBinary(upper, length), prefixAsBytes); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } private boolean containsNullsOnly(Integer id) { return valueCounts != null && valueCounts.containsKey(id) && nullCounts != null && nullCounts.containsKey(id) && valueCounts.get(id) - nullCounts.get(id) == 0; } } }
1
16,440
This should not convert each value to a literal on every invocation. Instead, this PR can add a `literals` method to `BoundSetPredicate` to access the values as literals. That way, the conversion happens once and can be lazy.
apache-iceberg
java
@@ -22,7 +22,7 @@ def _load_lib(): """Load LightGBM Library.""" lib_path = find_lib_path() if len(lib_path) == 0: - raise Exception("cannot find LightGBM library") + return None lib = ctypes.cdll.LoadLibrary(lib_path[0]) lib.LGBM_GetLastError.restype = ctypes.c_char_p return lib
1
# coding: utf-8 # pylint: disable = invalid-name, C0111, C0301 # pylint: disable = R0912, R0913, R0914, W0105, W0201, W0212 """Wrapper c_api of LightGBM""" from __future__ import absolute_import import ctypes import os import warnings from tempfile import NamedTemporaryFile import numpy as np import scipy.sparse from .compat import (DataFrame, Series, integer_types, json, json_default_with_numpy, numeric_types, range_, string_type) from .libpath import find_lib_path def _load_lib(): """Load LightGBM Library.""" lib_path = find_lib_path() if len(lib_path) == 0: raise Exception("cannot find LightGBM library") lib = ctypes.cdll.LoadLibrary(lib_path[0]) lib.LGBM_GetLastError.restype = ctypes.c_char_p return lib _LIB = _load_lib() class LightGBMError(Exception): """Error throwed by LightGBM""" pass def _safe_call(ret): """Check the return value of C API call Parameters ---------- ret : int return value from API calls """ if ret != 0: raise LightGBMError(_LIB.LGBM_GetLastError()) def is_numeric(obj): """Check is a number or not, include numpy number etc.""" try: float(obj) return True except (TypeError, ValueError): # TypeError: obj is not a string or a number # ValueError: invalid literal return False def is_numpy_1d_array(data): """Check is 1d numpy array""" return isinstance(data, np.ndarray) and len(data.shape) == 1 def is_1d_list(data): """Check is 1d list""" return isinstance(data, list) and \ (not data or isinstance(data[0], numeric_types)) def list_to_1d_numpy(data, dtype=np.float32, name='list'): """convert to 1d numpy array""" if is_numpy_1d_array(data): if data.dtype == dtype: return data else: return data.astype(dtype=dtype, copy=False) elif is_1d_list(data): return np.array(data, dtype=dtype, copy=False) elif isinstance(data, Series): return data.values.astype(dtype) else: raise TypeError("Wrong type({}) for {}, should be list or numpy array".format(type(data).__name__, name)) def cfloat32_array_to_numpy(cptr, length): """Convert a ctypes float pointer array to a numpy array. """ if isinstance(cptr, ctypes.POINTER(ctypes.c_float)): return np.fromiter(cptr, dtype=np.float32, count=length) else: raise RuntimeError('Expected float pointer') def cfloat64_array_to_numpy(cptr, length): """Convert a ctypes double pointer array to a numpy array. """ if isinstance(cptr, ctypes.POINTER(ctypes.c_double)): return np.fromiter(cptr, dtype=np.float64, count=length) else: raise RuntimeError('Expected double pointer') def cint32_array_to_numpy(cptr, length): """Convert a ctypes float pointer array to a numpy array. """ if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)): return np.fromiter(cptr, dtype=np.int32, count=length) else: raise RuntimeError('Expected int pointer') def c_str(string): """Convert a python string to cstring.""" return ctypes.c_char_p(string.encode('utf-8')) def c_array(ctype, values): """Convert a python array to c array.""" return (ctype * len(values))(*values) def param_dict_to_str(data): if data is None or not data: return "" pairs = [] for key, val in data.items(): if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val): pairs.append(str(key) + '=' + ','.join(map(str, val))) elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val): pairs.append(str(key) + '=' + str(val)) else: raise TypeError('Unknown type of parameter:%s, got:%s' % (key, type(val).__name__)) return ' '.join(pairs) class _temp_file(object): def __enter__(self): with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f: self.name = f.name return self def __exit__(self, exc_type, exc_val, exc_tb): if os.path.isfile(self.name): os.remove(self.name) def readlines(self): with open(self.name, "r+") as f: ret = f.readlines() return ret def writelines(self, lines): with open(self.name, "w+") as f: f.writelines(lines) """marco definition of data type in c_api of LightGBM""" C_API_DTYPE_FLOAT32 = 0 C_API_DTYPE_FLOAT64 = 1 C_API_DTYPE_INT32 = 2 C_API_DTYPE_INT64 = 3 """Matric is row major in python""" C_API_IS_ROW_MAJOR = 1 """marco definition of prediction type in c_api of LightGBM""" C_API_PREDICT_NORMAL = 0 C_API_PREDICT_RAW_SCORE = 1 C_API_PREDICT_LEAF_INDEX = 2 """data type of data field""" FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32, "weight": C_API_DTYPE_FLOAT32, "init_score": C_API_DTYPE_FLOAT64, "group": C_API_DTYPE_INT32} def c_float_array(data): """get pointer of float numpy array / list""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): if data.dtype == np.float32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) type_data = C_API_DTYPE_FLOAT32 elif data.dtype == np.float64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) type_data = C_API_DTYPE_FLOAT64 else: raise TypeError("Expected np.float32 or np.float64, met type({})" .format(data.dtype)) else: raise TypeError("Unknown type({})".format(type(data).__name__)) return (ptr_data, type_data) def c_int_array(data): """get pointer of int numpy array / list""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): if data.dtype == np.int32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) type_data = C_API_DTYPE_INT32 elif data.dtype == np.int64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)) type_data = C_API_DTYPE_INT64 else: raise TypeError("Expected np.int32 or np.int64, met type({})" .format(data.dtype)) else: raise TypeError("Unknown type({})".format(type(data).__name__)) return (ptr_data, type_data) PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64': 'int', 'uint8': 'int', 'uint16': 'int', 'uint32': 'int', 'uint64': 'int', 'float16': 'float', 'float32': 'float', 'float64': 'float', 'bool': 'int'} def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical): if isinstance(data, DataFrame): if feature_name == 'auto' or feature_name is None: if all([isinstance(name, integer_types + (np.integer, )) for name in data.columns]): msg = """Using Pandas (default) integer column names, not column indexes. You can use indexes with DataFrame.values.""" warnings.filterwarnings('once') warnings.warn(msg, stacklevel=5) data = data.rename(columns=str) cat_cols = data.select_dtypes(include=['category']).columns if pandas_categorical is None: # train dataset pandas_categorical = [list(data[col].cat.categories) for col in cat_cols] else: if len(cat_cols) != len(pandas_categorical): raise ValueError('train and valid dataset categorical_feature do not match.') for col, category in zip(cat_cols, pandas_categorical): if list(data[col].cat.categories) != list(category): data[col] = data[col].cat.set_categories(category) if len(cat_cols): # cat_cols is pandas Index object data = data.copy() # not alter origin DataFrame data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes) if categorical_feature is not None: if feature_name is None: feature_name = list(data.columns) if categorical_feature == 'auto': categorical_feature = list(cat_cols) else: categorical_feature = list(categorical_feature) + list(cat_cols) if feature_name == 'auto': feature_name = list(data.columns) data_dtypes = data.dtypes if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes): bad_fields = [data.columns[i] for i, dtype in enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER] msg = """DataFrame.dtypes for data must be int, float or bool. Did not expect the data types in fields """ raise ValueError(msg + ', '.join(bad_fields)) data = data.values.astype('float') else: if feature_name == 'auto': feature_name = None if categorical_feature == 'auto': categorical_feature = None return data, feature_name, categorical_feature, pandas_categorical def _label_from_pandas(label): if isinstance(label, DataFrame): if len(label.columns) > 1: raise ValueError('DataFrame for label cannot have multiple columns') label_dtypes = label.dtypes if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes): raise ValueError('DataFrame.dtypes for label must be int, float or bool') label = label.values.astype('float') return label def _save_pandas_categorical(file_name, pandas_categorical): with open(file_name, 'a') as f: f.write('\npandas_categorical:' + json.dumps(pandas_categorical, default=json_default_with_numpy)) def _load_pandas_categorical(file_name): with open(file_name, 'r') as f: last_line = f.readlines()[-1] if last_line.startswith('pandas_categorical:'): return json.loads(last_line[len('pandas_categorical:'):]) return None class _InnerPredictor(object): """ A _InnerPredictor of LightGBM. Only used for prediction, usually used for continued-train Note: Can convert from Booster, but cannot convert to Booster """ def __init__(self, model_file=None, booster_handle=None): """Initialize the _InnerPredictor. Not expose to user Parameters ---------- model_file : string Path to the model file. booster_handle : Handle of Booster use handle to init """ self.handle = ctypes.c_void_p() self.__is_manage_handle = True if model_file is not None: """Prediction task""" out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterCreateFromModelfile( c_str(model_file), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.num_class = out_num_class.value self.num_total_iteration = out_num_iterations.value self.pandas_categorical = _load_pandas_categorical(model_file) elif booster_handle is not None: self.__is_manage_handle = False self.handle = booster_handle out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.num_class = out_num_class.value out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetCurrentIteration( self.handle, ctypes.byref(out_num_iterations))) self.num_total_iteration = out_num_iterations.value self.pandas_categorical = None else: raise TypeError('Need Model file or Booster handle to create a predictor') def __del__(self): if self.__is_manage_handle: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) def __getstate__(self): this = self.__dict__.copy() this.pop('handle', None) return this def predict(self, data, num_iteration=-1, raw_score=False, pred_leaf=False, data_has_header=False, is_reshape=True): """ Predict logic Parameters ---------- data : string/numpy array/scipy.sparse Data source for prediction When data type is string, it represents the path of txt file num_iteration : int Used iteration for prediction raw_score : bool True for predict raw score pred_leaf : bool True for predict leaf index data_has_header : bool Used for txt data, True if txt data has header is_reshape : bool Reshape to (nrow, ncol) if true Returns ------- Prediction result """ if isinstance(data, Dataset): raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead") data = _data_from_pandas(data, None, None, self.pandas_categorical)[0] predict_type = C_API_PREDICT_NORMAL if raw_score: predict_type = C_API_PREDICT_RAW_SCORE if pred_leaf: predict_type = C_API_PREDICT_LEAF_INDEX int_data_has_header = 1 if data_has_header else 0 if num_iteration > self.num_total_iteration: num_iteration = self.num_total_iteration if isinstance(data, string_type): with _temp_file() as f: _safe_call(_LIB.LGBM_BoosterPredictForFile( self.handle, c_str(data), ctypes.c_int(int_data_has_header), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), c_str(f.name))) lines = f.readlines() nrow = len(lines) preds = [float(token) for line in lines for token in line.split('\t')] preds = np.array(preds, dtype=np.float64, copy=False) elif isinstance(data, scipy.sparse.csr_matrix): preds, nrow = self.__pred_for_csr(data, num_iteration, predict_type) elif isinstance(data, scipy.sparse.csc_matrix): preds, nrow = self.__pred_for_csc(data, num_iteration, predict_type) elif isinstance(data, np.ndarray): preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type) elif isinstance(data, DataFrame): preds, nrow = self.__pred_for_np2d(data.values, num_iteration, predict_type) else: try: csr = scipy.sparse.csr_matrix(data) preds, nrow = self.__pred_for_csr(csr, num_iteration, predict_type) except: raise TypeError('Cannot predict data for type {}'.format(type(data).__name__)) if pred_leaf: preds = preds.astype(np.int32) if is_reshape and preds.size != nrow: if preds.size % nrow == 0: preds = preds.reshape(nrow, -1) else: raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)' % (preds.size, nrow)) return preds def __get_num_preds(self, num_iteration, nrow, predict_type): """ Get size of prediction result """ n_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterCalcNumPredict( self.handle, ctypes.c_int(nrow), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), ctypes.byref(n_preds))) return n_preds.value def __pred_for_np2d(self, mat, num_iteration, predict_type): """ Predict for a 2-D numpy matrix. """ if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') if mat.dtype == np.float32 or mat.dtype == np.float64: data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: """change non-float data to float data, need to copy""" data = np.array(mat.reshape(mat.size), dtype=np.float32) ptr_data, type_ptr_data = c_float_array(data) n_preds = self.__get_num_preds(num_iteration, mat.shape[0], predict_type) preds = np.zeros(n_preds, dtype=np.float64) out_num_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterPredictForMat( self.handle, ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int(mat.shape[0]), ctypes.c_int(mat.shape[1]), ctypes.c_int(C_API_IS_ROW_MAJOR), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, mat.shape[0] def __pred_for_csr(self, csr, num_iteration, predict_type): """ Predict for a csr data """ nrow = len(csr.indptr) - 1 n_preds = self.__get_num_preds(num_iteration, nrow, predict_type) preds = np.zeros(n_preds, dtype=np.float64) out_num_preds = ctypes.c_int64(0) ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr) ptr_data, type_ptr_data = c_float_array(csr.data) _safe_call(_LIB.LGBM_BoosterPredictForCSR( self.handle, ptr_indptr, ctypes.c_int32(type_ptr_indptr), csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, nrow def __pred_for_csc(self, csc, num_iteration, predict_type): """ Predict for a csc data """ nrow = csc.shape[0] n_preds = self.__get_num_preds(num_iteration, nrow, predict_type) preds = np.zeros(n_preds, dtype=np.float64) out_num_preds = ctypes.c_int64(0) ptr_indptr, type_ptr_indptr = c_int_array(csc.indptr) ptr_data, type_ptr_data = c_float_array(csc.data) _safe_call(_LIB.LGBM_BoosterPredictForCSC( self.handle, ptr_indptr, ctypes.c_int32(type_ptr_indptr), csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, nrow class Dataset(object): """Dataset in LightGBM.""" def __init__(self, data, label=None, max_bin=255, reference=None, weight=None, group=None, silent=False, feature_name='auto', categorical_feature='auto', params=None, free_raw_data=True): """ Parameters ---------- data : string/numpy array/scipy.sparse Data source of Dataset. When data type is string, it represents the path of txt file label : list or numpy 1-D array, optional Label of the data max_bin : int, required Max number of discrete bin for features reference : Other Dataset, optional If this dataset validation, need to use training data as reference weight : list or numpy 1-D array , optional Weight for each instance. group : list or numpy 1-D array , optional Group/query size for dataset silent : boolean, optional Whether print messages during construction feature_name : list of str, or 'auto' Feature names If 'auto' and data is pandas DataFrame, use data columns name categorical_feature : list of str or int, or 'auto' Categorical features, type int represents index, type str represents feature names (need to specify feature_name as well) If 'auto' and data is pandas DataFrame, use pandas categorical columns params: dict, optional Other parameters free_raw_data: Bool True if need to free raw data after construct inner dataset """ self.handle = None self.data = data self.label = label self.max_bin = max_bin self.reference = reference self.weight = weight self.group = group self.silent = silent self.feature_name = feature_name self.categorical_feature = categorical_feature self.params = params self.free_raw_data = free_raw_data self.used_indices = None self._predictor = None self.pandas_categorical = None def __del__(self): self._free_handle() def _free_handle(self): if self.handle is not None: _safe_call(_LIB.LGBM_DatasetFree(self.handle)) self.handle = None def _lazy_init(self, data, label=None, max_bin=255, reference=None, weight=None, group=None, predictor=None, silent=False, feature_name='auto', categorical_feature='auto', params=None): if data is None: self.handle = None return data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data, feature_name, categorical_feature, self.pandas_categorical) label = _label_from_pandas(label) self.data_has_header = False """process for args""" params = {} if params is None else params self.max_bin = max_bin self.predictor = predictor params["max_bin"] = max_bin if silent: params["verbose"] = 0 elif "verbose" not in params: params["verbose"] = 1 """get categorical features""" if categorical_feature is not None: categorical_indices = set() feature_dict = {} if feature_name is not None: feature_dict = {name: i for i, name in enumerate(feature_name)} for name in categorical_feature: if isinstance(name, string_type) and name in feature_dict: categorical_indices.add(feature_dict[name]) elif isinstance(name, integer_types): categorical_indices.add(name) else: raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature" .format(type(name).__name__, name)) params['categorical_column'] = sorted(categorical_indices) params_str = param_dict_to_str(params) """process for reference dataset""" ref_dataset = None if isinstance(reference, Dataset): ref_dataset = reference.construct().handle elif reference is not None: raise TypeError('Reference dataset should be None or dataset instance') """start construct data""" if isinstance(data, string_type): """check data has header or not""" if str(params.get("has_header", "")).lower() == "true" \ or str(params.get("header", "")).lower() == "true": self.data_has_header = True self.handle = ctypes.c_void_p() _safe_call(_LIB.LGBM_DatasetCreateFromFile( c_str(data), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) elif isinstance(data, scipy.sparse.csr_matrix): self.__init_from_csr(data, params_str, ref_dataset) elif isinstance(data, scipy.sparse.csc_matrix): self.__init_from_csc(data, params_str, ref_dataset) elif isinstance(data, np.ndarray): self.__init_from_np2d(data, params_str, ref_dataset) else: try: csr = scipy.sparse.csr_matrix(data) self.__init_from_csr(csr, params_str, ref_dataset) except: raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__)) if label is not None: self.set_label(label) if self.get_label() is None: raise ValueError("Label should not be None") if weight is not None: self.set_weight(weight) if group is not None: self.set_group(group) # load init score if isinstance(self.predictor, _InnerPredictor): init_score = self.predictor.predict(data, raw_score=True, data_has_header=self.data_has_header, is_reshape=False) if self.predictor.num_class > 1: # need re group init score new_init_score = np.zeros(init_score.size, dtype=np.float32) num_data = self.num_data() for i in range_(num_data): for j in range_(self.predictor.num_class): new_init_score[j * num_data + i] = init_score[i * self.predictor.num_class + j] init_score = new_init_score self.set_init_score(init_score) elif self.predictor is not None: raise TypeError('wrong predictor type {}'.format(type(self.predictor).__name__)) # set feature names self.set_feature_name(feature_name) def __init_from_np2d(self, mat, params_str, ref_dataset): """ Initialize data from a 2-D numpy matrix. """ if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') self.handle = ctypes.c_void_p() if mat.dtype == np.float32 or mat.dtype == np.float64: data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: """change non-float data to float data, need to copy""" data = np.array(mat.reshape(mat.size), dtype=np.float32) ptr_data, type_ptr_data = c_float_array(data) _safe_call(_LIB.LGBM_DatasetCreateFromMat( ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int(mat.shape[0]), ctypes.c_int(mat.shape[1]), ctypes.c_int(C_API_IS_ROW_MAJOR), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) def __init_from_csr(self, csr, params_str, ref_dataset): """ Initialize data from a CSR matrix. """ if len(csr.indices) != len(csr.data): raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data))) self.handle = ctypes.c_void_p() ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr) ptr_data, type_ptr_data = c_float_array(csr.data) _safe_call(_LIB.LGBM_DatasetCreateFromCSR( ptr_indptr, ctypes.c_int(type_ptr_indptr), csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) def __init_from_csc(self, csc, params_str, ref_dataset): """ Initialize data from a csc matrix. """ if len(csc.indices) != len(csc.data): raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data))) self.handle = ctypes.c_void_p() ptr_indptr, type_ptr_indptr = c_int_array(csc.indptr) ptr_data, type_ptr_data = c_float_array(csc.data) _safe_call(_LIB.LGBM_DatasetCreateFromCSC( ptr_indptr, ctypes.c_int(type_ptr_indptr), csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) def construct(self): """Lazy init""" if self.handle is None: if self.reference is not None: if self.used_indices is None: """create valid""" self._lazy_init(self.data, label=self.label, max_bin=self.max_bin, reference=self.reference, weight=self.weight, group=self.group, predictor=self._predictor, silent=self.silent, feature_name=self.feature_name, params=self.params) else: """construct subset""" used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices') self.handle = ctypes.c_void_p() params_str = param_dict_to_str(self.params) _safe_call(_LIB.LGBM_DatasetGetSubset( self.reference.construct().handle, used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ctypes.c_int(used_indices.shape[0]), c_str(params_str), ctypes.byref(self.handle))) if self.get_label() is None: raise ValueError("Label should not be None.") else: """create train""" self._lazy_init(self.data, label=self.label, max_bin=self.max_bin, weight=self.weight, group=self.group, predictor=self._predictor, silent=self.silent, feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=self.params) if self.free_raw_data: self.data = None return self def create_valid(self, data, label=None, weight=None, group=None, silent=False, params=None): """ Create validation data align with current dataset Parameters ---------- data : string/numpy array/scipy.sparse Data source of Dataset. When data type is string, it represents the path of txt file label : list or numpy 1-D array, optional Label of the training data. weight : list or numpy 1-D array , optional Weight for each instance. group : list or numpy 1-D array , optional Group/query size for dataset silent : boolean, optional Whether print messages during construction params: dict, optional Other parameters """ ret = Dataset(data, label=label, max_bin=self.max_bin, reference=self, weight=weight, group=group, silent=silent, params=params, free_raw_data=self.free_raw_data) ret._predictor = self._predictor ret.pandas_categorical = self.pandas_categorical return ret def subset(self, used_indices, params=None): """ Get subset of current dataset Parameters ---------- used_indices : list of int Used indices of this subset params : dict Other parameters """ ret = Dataset(None, reference=self, feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=params) ret._predictor = self._predictor ret.pandas_categorical = self.pandas_categorical ret.used_indices = used_indices return ret def save_binary(self, filename): """ Save Dataset to binary file Parameters ---------- filename : string Name of the output file. """ _safe_call(_LIB.LGBM_DatasetSaveBinary( self.construct().handle, c_str(filename))) def _update_params(self, params): if not self.params: self.params = params else: self.params.update(params) def set_field(self, field_name, data): """Set property into the Dataset. Parameters ---------- field_name: str The field name of the information data: numpy array or list or None The array ofdata to be set """ if self.handle is None: raise Exception("Cannot set %s before construct dataset" % field_name) if data is None: """set to None""" _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), None, ctypes.c_int(0), ctypes.c_int(FIELD_TYPE_MAPPER[field_name]))) return dtype = np.float32 if field_name == 'group': dtype = np.int32 elif field_name == 'init_score': dtype = np.float64 data = list_to_1d_numpy(data, dtype, name=field_name) if data.dtype == np.float32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) type_data = C_API_DTYPE_FLOAT32 elif data.dtype == np.float64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) type_data = C_API_DTYPE_FLOAT64 elif data.dtype == np.int32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) type_data = C_API_DTYPE_INT32 else: raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype)) if type_data != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Input type error for set_field") _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), ptr_data, ctypes.c_int(len(data)), ctypes.c_int(type_data))) def get_field(self, field_name): """Get property from the Dataset. Parameters ---------- field_name: str The field name of the information Returns ------- info : array A numpy array of information of the data """ if self.handle is None: raise Exception("Cannot get %s before construct dataset" % field_name) tmp_out_len = ctypes.c_int() out_type = ctypes.c_int() ret = ctypes.POINTER(ctypes.c_void_p)() _safe_call(_LIB.LGBM_DatasetGetField( self.handle, c_str(field_name), ctypes.byref(tmp_out_len), ctypes.byref(ret), ctypes.byref(out_type))) if out_type.value != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Return type error for get_field") if tmp_out_len.value == 0: return None if out_type.value == C_API_DTYPE_INT32: return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_FLOAT32: return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_FLOAT64: return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value) else: raise TypeError("Unknown type") def set_categorical_feature(self, categorical_feature): """ Set categorical features Parameters ---------- categorical_feature : list of int or str Name/index of categorical features """ if self.categorical_feature == categorical_feature: return if self.data is not None: self.categorical_feature = categorical_feature self._free_handle() else: raise LightGBMError("Cannot set categorical feature after freed raw data, set free_raw_data=False when construct Dataset to avoid this.") def _set_predictor(self, predictor): """ Set predictor for continued training, not recommand for user to call this function. Please set init_model in engine.train or engine.cv """ if predictor is self._predictor: return if self.data is not None: self._predictor = predictor self._free_handle() else: raise LightGBMError("Cannot set predictor after freed raw data, set free_raw_data=False when construct Dataset to avoid this.") def set_reference(self, reference): """ Set reference dataset Parameters ---------- reference : Dataset Will use reference as template to consturct current dataset """ self.set_categorical_feature(reference.categorical_feature) self.set_feature_name(reference.feature_name) self._set_predictor(reference._predictor) if self.reference is reference: return if self.data is not None: self.reference = reference self._free_handle() else: raise LightGBMError("Cannot set reference after freed raw data, set free_raw_data=False when construct Dataset to avoid this.") def set_feature_name(self, feature_name): """ Set feature name Parameters ---------- feature_name : list of str Feature names """ if feature_name != 'auto': self.feature_name = feature_name if self.handle is not None and feature_name is not None and feature_name != 'auto': if len(feature_name) != self.num_feature(): raise ValueError("Length of feature_name({}) and num_feature({}) don't match".format(len(feature_name), self.num_feature())) c_feature_name = [c_str(name) for name in feature_name] _safe_call(_LIB.LGBM_DatasetSetFeatureNames( self.handle, c_array(ctypes.c_char_p, c_feature_name), ctypes.c_int(len(feature_name)))) def set_label(self, label): """ Set label of Dataset Parameters ---------- label: numpy array or list or None The label information to be set into Dataset """ self.label = label if self.handle is not None: label = list_to_1d_numpy(label, name='label') self.set_field('label', label) def set_weight(self, weight): """ Set weight of each instance. Parameters ---------- weight : numpy array or list or None Weight for each data point """ self.weight = weight if self.handle is not None and weight is not None: weight = list_to_1d_numpy(weight, name='weight') self.set_field('weight', weight) def set_init_score(self, init_score): """ Set init score of booster to start from. Parameters ---------- init_score: numpy array or list or None Init score for booster """ self.init_score = init_score if self.handle is not None and init_score is not None: init_score = list_to_1d_numpy(init_score, np.float64, name='init_score') self.set_field('init_score', init_score) def set_group(self, group): """ Set group size of Dataset (used for ranking). Parameters ---------- group : numpy array or list or None Group size of each group """ self.group = group if self.handle is not None and group is not None: group = list_to_1d_numpy(group, np.int32, name='group') self.set_field('group', group) def get_label(self): """ Get the label of the Dataset. Returns ------- label : array """ if self.label is None and self.handle is not None: self.label = self.get_field('label') return self.label def get_weight(self): """ Get the weight of the Dataset. Returns ------- weight : array """ if self.weight is None and self.handle is not None: self.weight = self.get_field('weight') return self.weight def get_init_score(self): """ Get the initial score of the Dataset. Returns ------- init_score : array """ if self.init_score is None and self.handle is not None: self.init_score = self.get_field('init_score') return self.init_score def get_group(self): """ Get the group of the Dataset. Returns ------- init_score : array """ if self.group is None and self.handle is not None: self.group = self.get_field('group') if self.group is not None: # group data from LightGBM is boundaries data, need to convert to group size new_group = [] for i in range_(len(self.group) - 1): new_group.append(self.group[i + 1] - self.group[i]) self.group = new_group return self.group def num_data(self): """ Get the number of rows in the Dataset. Returns ------- number of rows : int """ if self.handle is not None: ret = ctypes.c_int() _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_data before construct dataset") def num_feature(self): """ Get the number of columns (features) in the Dataset. Returns ------- number of columns : int """ if self.handle is not None: ret = ctypes.c_int() _safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_feature before construct dataset") class Booster(object): """"Booster in LightGBM.""" def __init__(self, params=None, train_set=None, model_file=None, silent=False): """ Initialize the Booster. Parameters ---------- params : dict Parameters for boosters. train_set : Dataset Training dataset model_file : string Path to the model file. silent : boolean, optional Whether print messages during construction """ self.handle = ctypes.c_void_p() self.__need_reload_eval_info = True self.__train_data_name = "training" self.__attr = {} self.best_iteration = -1 self.best_score = {} params = {} if params is None else params if silent: params["verbose"] = 0 elif "verbose" not in params: params["verbose"] = 1 if train_set is not None: """Training task""" if not isinstance(train_set, Dataset): raise TypeError('Training data should be Dataset instance, met {}'.format(type(train_set).__name__)) params_str = param_dict_to_str(params) """construct booster object""" _safe_call(_LIB.LGBM_BoosterCreate( train_set.construct().handle, c_str(params_str), ctypes.byref(self.handle))) """save reference to data""" self.train_set = train_set self.valid_sets = [] self.name_valid_sets = [] self.__num_dataset = 1 self.__init_predictor = train_set._predictor if self.__init_predictor is not None: _safe_call(_LIB.LGBM_BoosterMerge( self.handle, self.__init_predictor.handle)) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.__num_class = out_num_class.value """buffer for inner predict""" self.__inner_predict_buffer = [None] self.__is_predicted_cur_iter = [False] self.__get_eval_info() self.pandas_categorical = train_set.pandas_categorical elif model_file is not None: """Prediction task""" out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterCreateFromModelfile( c_str(model_file), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.__num_class = out_num_class.value self.pandas_categorical = _load_pandas_categorical(model_file) elif 'model_str' in params: self.__load_model_from_string(params['model_str']) else: raise TypeError('Need at least one training dataset or model file to create booster instance') def __del__(self): if self.handle is not None: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) def __copy__(self): return self.__deepcopy__(None) def __deepcopy__(self, _): model_str = self.__save_model_to_string() booster = Booster({'model_str': model_str}) booster.pandas_categorical = self.pandas_categorical return booster def __getstate__(self): this = self.__dict__.copy() handle = this['handle'] this.pop('train_set', None) this.pop('valid_sets', None) if handle is not None: this["handle"] = self.__save_model_to_string() return this def __setstate__(self, state): model_str = state.get('handle', None) if model_str is not None: handle = ctypes.c_void_p() out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterLoadModelFromString( c_str(model_str), ctypes.byref(out_num_iterations), ctypes.byref(handle))) state['handle'] = handle self.__dict__.update(state) def free_dataset(self): self.__dict__.pop('train_set', None) self.__dict__.pop('valid_sets', None) def set_train_data_name(self, name): self.__train_data_name = name def add_valid(self, data, name): """ Add an validation data Parameters ---------- data : Dataset Validation data name : String Name of validation data """ if not isinstance(data, Dataset): raise TypeError('valid data should be Dataset instance, met {}'.format(type(data).__name__)) if data._predictor is not self.__init_predictor: raise LightGBMError("Add validation data failed, you should use same predictor for these data") _safe_call(_LIB.LGBM_BoosterAddValidData( self.handle, data.construct().handle)) self.valid_sets.append(data) self.name_valid_sets.append(name) self.__num_dataset += 1 self.__inner_predict_buffer.append(None) self.__is_predicted_cur_iter.append(False) def reset_parameter(self, params): """ Reset parameters for booster Parameters ---------- params : dict New parameters for boosters silent : boolean, optional Whether print messages during construction """ if 'metric' in params: self.__need_reload_eval_info = True params_str = param_dict_to_str(params) if params_str: _safe_call(_LIB.LGBM_BoosterResetParameter( self.handle, c_str(params_str))) def update(self, train_set=None, fobj=None): """ Update for one iteration Note: for multi-class task, the score is group by class_id first, then group by row_id if you want to get i-th row score in j-th class, the access way is score[j*num_data+i] and you should group grad and hess in this way as well Parameters ---------- train_set : Training data, None means use last training data fobj : function Customized objective function. Returns ------- is_finished, bool """ """need reset training data""" if train_set is not None and train_set is not self.train_set: if not isinstance(train_set, Dataset): raise TypeError('Training data should be Dataset instance, met {}'.format(type(train_set).__name__)) if train_set._predictor is not self.__init_predictor: raise LightGBMError("Replace training data failed, you should use same predictor for these data") self.train_set = train_set _safe_call(_LIB.LGBM_BoosterResetTrainingData( self.handle, self.train_set.construct().handle)) self.__inner_predict_buffer[0] = None is_finished = ctypes.c_int(0) if fobj is None: _safe_call(_LIB.LGBM_BoosterUpdateOneIter( self.handle, ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1 else: grad, hess = fobj(self.__inner_predict(0), self.train_set) return self.__boost(grad, hess) def __boost(self, grad, hess): """ Boost the booster for one iteration, with customized gradient statistics. Note: for multi-class task, the score is group by class_id first, then group by row_id if you want to get i-th row score in j-th class, the access way is score[j*num_data+i] and you should group grad and hess in this way as well Parameters ---------- grad : 1d numpy or 1d list The first order of gradient. hess : 1d numpy or 1d list The second order of gradient. Returns ------- is_finished, bool """ grad = list_to_1d_numpy(grad, name='gradient') hess = list_to_1d_numpy(hess, name='hessian') if len(grad) != len(hess): raise ValueError("Lengths of gradient({}) and hessian({}) don't match".format(len(grad), len(hess))) is_finished = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom( self.handle, grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1 def rollback_one_iter(self): """ Rollback one iteration """ _safe_call(_LIB.LGBM_BoosterRollbackOneIter( self.handle)) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] def current_iteration(self): out_cur_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetCurrentIteration( self.handle, ctypes.byref(out_cur_iter))) return out_cur_iter.value def eval(self, data, name, feval=None): """ Evaluate for data Parameters ---------- data : Dataset object name : Name of data feval : function Custom evaluation function. Returns ------- result: list Evaluation result list. """ if not isinstance(data, Dataset): raise TypeError("Can only eval for Dataset instance") data_idx = -1 if data is self.train_set: data_idx = 0 else: for i in range_(len(self.valid_sets)): if data is self.valid_sets[i]: data_idx = i + 1 break """need to push new valid data""" if data_idx == -1: self.add_valid(data, name) data_idx = self.__num_dataset - 1 return self.__inner_eval(name, data_idx, feval) def eval_train(self, feval=None): """ Evaluate for training data Parameters ---------- feval : function Custom evaluation function. Returns ------- result: str Evaluation result list. """ return self.__inner_eval(self.__train_data_name, 0, feval) def eval_valid(self, feval=None): """ Evaluate for validation data Parameters ---------- feval : function Custom evaluation function. Returns ------- result: str Evaluation result list. """ return [item for i in range_(1, self.__num_dataset) for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)] def save_model(self, filename, num_iteration=-1): """ Save model of booster to file Parameters ---------- filename : str Filename to save num_iteration: int Number of iteration that want to save. < 0 means save the best iteration(if have) """ if num_iteration <= 0: num_iteration = self.best_iteration _safe_call(_LIB.LGBM_BoosterSaveModel( self.handle, ctypes.c_int(num_iteration), c_str(filename))) _save_pandas_categorical(filename, self.pandas_categorical) def __load_model_from_string(self, model_str): """[Private] Load model from string""" out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterLoadModelFromString( c_str(model_str), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.__num_class = out_num_class.value def __save_model_to_string(self, num_iteration=-1): """[Private] Save model to string""" if num_iteration <= 0: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(num_iteration), ctypes.c_int(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value '''if buffer length is not long enough, re-allocate a buffer''' if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(num_iteration), ctypes.c_int(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) return string_buffer.value.decode() def dump_model(self, num_iteration=-1): """ Dump model to json format Parameters ---------- num_iteration: int Number of iteration that want to dump. < 0 means dump to best iteration(if have) Returns ------- Json format of model """ if num_iteration <= 0: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(num_iteration), ctypes.c_int(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value '''if buffer length is not long enough, reallocate a buffer''' if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(num_iteration), ctypes.c_int(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) return json.loads(string_buffer.value.decode()) def predict(self, data, num_iteration=-1, raw_score=False, pred_leaf=False, data_has_header=False, is_reshape=True): """ Predict logic Parameters ---------- data : string/numpy array/scipy.sparse Data source for prediction When data type is string, it represents the path of txt file num_iteration : int Used iteration for prediction, < 0 means predict for best iteration(if have) raw_score : bool True for predict raw score pred_leaf : bool True for predict leaf index data_has_header : bool Used for txt data is_reshape : bool Reshape to (nrow, ncol) if true Returns ------- Prediction result """ predictor = self._to_predictor() if num_iteration <= 0: num_iteration = self.best_iteration return predictor.predict(data, num_iteration, raw_score, pred_leaf, data_has_header, is_reshape) def _to_predictor(self): """Convert to predictor""" predictor = _InnerPredictor(booster_handle=self.handle) predictor.pandas_categorical = self.pandas_categorical return predictor def feature_name(self): """ Get feature names. Returns ------- result : array Array of feature names. """ out_num_feature = ctypes.c_int(0) """Get num of features""" _safe_call(_LIB.LGBM_BoosterGetNumFeature( self.handle, ctypes.byref(out_num_feature))) num_feature = out_num_feature.value """Get name of features""" tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetFeatureNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if num_feature != tmp_out_len.value: raise ValueError("Length of feature names doesn't equal with num_feature") return [string_buffers[i].value.decode() for i in range_(num_feature)] def feature_importance(self, importance_type='split'): """ Get feature importances Parameters ---------- importance_type : str, default "split" How the importance is calculated: "split" or "gain" "split" is the number of times a feature is used in a model "gain" is the total gain of splits which use the feature Returns ------- result : array Array of feature importances. """ if importance_type not in ["split", "gain"]: raise KeyError("importance_type must be split or gain") dump_model = self.dump_model() ret = [0] * (dump_model["max_feature_idx"] + 1) def dfs(root): if "split_feature" in root: if root['split_gain'] > 0: if importance_type == 'split': ret[root["split_feature"]] += 1 elif importance_type == 'gain': ret[root["split_feature"]] += root["split_gain"] dfs(root["left_child"]) dfs(root["right_child"]) for tree in dump_model["tree_info"]: dfs(tree["tree_structure"]) return np.array(ret) def __inner_eval(self, data_name, data_idx, feval=None): """ Evaulate training or validation data """ if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") self.__get_eval_info() ret = [] if self.__num_inner_eval > 0: result = np.array([0.0 for _ in range_(self.__num_inner_eval)], dtype=np.float64) tmp_out_len = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetEval( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if tmp_out_len.value != self.__num_inner_eval: raise ValueError("Wrong length of eval results") for i in range_(self.__num_inner_eval): ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i])) if feval is not None: if data_idx == 0: cur_data = self.train_set else: cur_data = self.valid_sets[data_idx - 1] feval_ret = feval(self.__inner_predict(data_idx), cur_data) if isinstance(feval_ret, list): for eval_name, val, is_higher_better in feval_ret: ret.append((data_name, eval_name, val, is_higher_better)) else: eval_name, val, is_higher_better = feval_ret ret.append((data_name, eval_name, val, is_higher_better)) return ret def __inner_predict(self, data_idx): """ Predict for training and validation dataset """ if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") if self.__inner_predict_buffer[data_idx] is None: if data_idx == 0: n_preds = self.train_set.num_data() * self.__num_class else: n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class self.__inner_predict_buffer[data_idx] = \ np.array([0.0 for _ in range_(n_preds)], dtype=np.float64, copy=False) """avoid to predict many time in one iteration""" if not self.__is_predicted_cur_iter[data_idx]: tmp_out_len = ctypes.c_int64(0) data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double)) _safe_call(_LIB.LGBM_BoosterGetPredict( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), data_ptr)) if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]): raise ValueError("Wrong length of predict results for data %d" % (data_idx)) self.__is_predicted_cur_iter[data_idx] = True return self.__inner_predict_buffer[data_idx] def __get_eval_info(self): """ Get inner evaluation count and names """ if self.__need_reload_eval_info: self.__need_reload_eval_info = False out_num_eval = ctypes.c_int(0) """Get num of inner evals""" _safe_call(_LIB.LGBM_BoosterGetEvalCounts( self.handle, ctypes.byref(out_num_eval))) self.__num_inner_eval = out_num_eval.value if self.__num_inner_eval > 0: """Get name of evals""" tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)] ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetEvalNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if self.__num_inner_eval != tmp_out_len.value: raise ValueError("Length of eval names doesn't equal with num_evals") self.__name_inner_eval = \ [string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)] self.__higher_better_inner_eval = \ [name.startswith(('auc', 'ndcg')) for name in self.__name_inner_eval] def attr(self, key): """ Get attribute string from the Booster. Parameters ---------- key : str The key to get attribute from. Returns ------- value : str The attribute value of the key, returns None if attribute do not exist. """ return self.__attr.get(key, None) def set_attr(self, **kwargs): """ Set the attribute of the Booster. Parameters ---------- **kwargs The attributes to set. Setting a value to None deletes an attribute. """ for key, value in kwargs.items(): if value is not None: if not isinstance(value, string_type): raise ValueError("Set attr only accepts strings") self.__attr[key] = value else: self.__attr.pop(key, None)
1
16,659
I think we still need this exception if doesn't have LIGHTGBM_BUILD_DOC
microsoft-LightGBM
cpp
@@ -71,15 +71,9 @@ func (h *Helper) waitForCertificateCondition(client clientset.CertificateInterfa if pollErr != nil && certificate != nil { log.Logf("Failed waiting for certificate %v: %v\n", name, pollErr.Error()) - if len(certificate.Status.Conditions) > 0 { - log.Logf("Observed certificate conditions:\n") - for _, cond := range certificate.Status.Conditions { - log.Logf("- Last Status: '%s' Reason: '%s', Message: '%s'\n", cond.Status, cond.Reason, cond.Message) - } - } + log.Logf("Certificate:\n") + h.describeCMObject(certificate) - log.Logf("Certificate description:\n") - h.Kubectl(certificate.Namespace).DescribeResource("certificate", name) log.Logf("Order and challenge descriptions:\n") h.Kubectl(certificate.Namespace).Describe("order", "challenge")
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package helper import ( "context" "crypto/x509" "fmt" "sort" "time" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" apiutil "github.com/jetstack/cert-manager/pkg/api/util" cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" v1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" clientset "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/typed/certmanager/v1" "github.com/jetstack/cert-manager/test/e2e/framework/log" ) // WaitForCertificateToExist waits for the named certificate to exist and returns the certificate func (h *Helper) WaitForCertificateToExist(namespace string, name string, timeout time.Duration) (*cmapi.Certificate, error) { client := h.CMClient.CertmanagerV1().Certificates(namespace) var certificate *v1.Certificate pollErr := wait.PollImmediate(500*time.Millisecond, timeout, func() (bool, error) { log.Logf("Waiting for Certificate %v to exist", name) var err error certificate, err = client.Get(context.TODO(), name, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, fmt.Errorf("error getting Certificate %v: %v", name, err) } return true, nil }) return certificate, pollErr } func (h *Helper) waitForCertificateCondition(client clientset.CertificateInterface, name string, check func(*v1.Certificate) bool, timeout time.Duration) (*cmapi.Certificate, error) { var certificate *v1.Certificate pollErr := wait.PollImmediate(500*time.Millisecond, timeout, func() (bool, error) { var err error certificate, err = client.Get(context.TODO(), name, metav1.GetOptions{}) if nil != err { certificate = nil return false, fmt.Errorf("error getting Certificate %v: %v", name, err) } return check(certificate), nil }) if pollErr != nil && certificate != nil { log.Logf("Failed waiting for certificate %v: %v\n", name, pollErr.Error()) if len(certificate.Status.Conditions) > 0 { log.Logf("Observed certificate conditions:\n") for _, cond := range certificate.Status.Conditions { log.Logf("- Last Status: '%s' Reason: '%s', Message: '%s'\n", cond.Status, cond.Reason, cond.Message) } } log.Logf("Certificate description:\n") h.Kubectl(certificate.Namespace).DescribeResource("certificate", name) log.Logf("Order and challenge descriptions:\n") h.Kubectl(certificate.Namespace).Describe("order", "challenge") log.Logf("CertificateRequest description:\n") crName, err := apiutil.ComputeName(certificate.Name, certificate.Spec) if err != nil { log.Logf("Failed to compute CertificateRequest name from certificate: %s", err) } else { h.Kubectl(certificate.Namespace).DescribeResource("certificaterequest", crName) } } return certificate, pollErr } // WaitForCertificateReadyAndDoneIssuing waits for the certificate resource to be in a Ready=True state and not be in an Issuing state. // The Ready=True condition will be checked against the provided certificate to make sure that it is up-to-date (condition gen. >= cert gen.). func (h *Helper) WaitForCertificateReadyAndDoneIssuing(cert *cmapi.Certificate, timeout time.Duration) (*cmapi.Certificate, error) { ready_true_condition := cmapi.CertificateCondition{ Type: cmapi.CertificateConditionReady, Status: cmmeta.ConditionTrue, ObservedGeneration: cert.Generation, } issuing_true_condition := cmapi.CertificateCondition{ Type: cmapi.CertificateConditionIssuing, Status: cmmeta.ConditionTrue, } return h.waitForCertificateCondition(h.CMClient.CertmanagerV1().Certificates(cert.Namespace), cert.Name, func(certificate *v1.Certificate) bool { if !apiutil.CertificateHasConditionWithObservedGeneration(certificate, ready_true_condition) { log.Logf( "Expected Certificate %v condition %v=%v (generation >= %v) but it has: %v", certificate.Name, ready_true_condition.Type, ready_true_condition.Status, ready_true_condition.ObservedGeneration, certificate.Status.Conditions, ) return false } if apiutil.CertificateHasCondition(certificate, issuing_true_condition) { log.Logf("Expected Certificate %v condition %v to be missing but it has: %v", certificate.Name, issuing_true_condition.Type, certificate.Status.Conditions) return false } return true }, timeout) } // WaitForCertificateNotReadyAndDoneIssuing waits for the certificate resource to be in a Ready=False state and not be in an Issuing state. // The Ready=False condition will be checked against the provided certificate to make sure that it is up-to-date (condition gen. >= cert gen.). func (h *Helper) WaitForCertificateNotReadyAndDoneIssuing(cert *cmapi.Certificate, timeout time.Duration) (*cmapi.Certificate, error) { ready_false_condition := cmapi.CertificateCondition{ Type: cmapi.CertificateConditionReady, Status: cmmeta.ConditionFalse, ObservedGeneration: cert.Generation, } issuing_true_condition := cmapi.CertificateCondition{ Type: cmapi.CertificateConditionIssuing, Status: cmmeta.ConditionTrue, } return h.waitForCertificateCondition(h.CMClient.CertmanagerV1().Certificates(cert.Namespace), cert.Name, func(certificate *v1.Certificate) bool { if !apiutil.CertificateHasConditionWithObservedGeneration(certificate, ready_false_condition) { log.Logf( "Expected Certificate %v condition %v=%v (generation >= %v) but it has: %v", certificate.Name, ready_false_condition.Type, ready_false_condition.Status, ready_false_condition.ObservedGeneration, certificate.Status.Conditions, ) return false } if apiutil.CertificateHasCondition(certificate, issuing_true_condition) { log.Logf("Expected Certificate %v condition %v to be missing but it has: %v", certificate.Name, issuing_true_condition.Type, certificate.Status.Conditions) return false } return true }, timeout) } func (h *Helper) deduplicateExtKeyUsages(us []x509.ExtKeyUsage) []x509.ExtKeyUsage { extKeyUsagesMap := make(map[x509.ExtKeyUsage]bool) for _, e := range us { extKeyUsagesMap[e] = true } us = make([]x509.ExtKeyUsage, 0) for e, ok := range extKeyUsagesMap { if ok { us = append(us, e) } } return us } func (h *Helper) defaultKeyUsagesToAdd(ns string, issuerRef *cmmeta.ObjectReference) (x509.KeyUsage, []x509.ExtKeyUsage, error) { var issuerSpec *cmapi.IssuerSpec switch issuerRef.Kind { case "ClusterIssuer": issuerObj, err := h.CMClient.CertmanagerV1().ClusterIssuers().Get(context.TODO(), issuerRef.Name, metav1.GetOptions{}) if err != nil { return 0, nil, fmt.Errorf("failed to find referenced ClusterIssuer %v: %s", issuerRef, err) } issuerSpec = &issuerObj.Spec default: issuerObj, err := h.CMClient.CertmanagerV1().Issuers(ns).Get(context.TODO(), issuerRef.Name, metav1.GetOptions{}) if err != nil { return 0, nil, fmt.Errorf("failed to find referenced Issuer %v: %s", issuerRef, err) } issuerSpec = &issuerObj.Spec } var keyUsages x509.KeyUsage var extKeyUsages []x509.ExtKeyUsage // Vault and ACME issuers will add server auth and client auth extended key // usages by default so we need to add them to the list of expected usages if issuerSpec.ACME != nil || issuerSpec.Vault != nil { extKeyUsages = append(extKeyUsages, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth) } // Vault issuers will add key agreement key usage if issuerSpec.Vault != nil { keyUsages |= x509.KeyUsageKeyAgreement } // Venafi issue adds server auth key usage if issuerSpec.Venafi != nil { extKeyUsages = append(extKeyUsages, x509.ExtKeyUsageServerAuth) } return keyUsages, extKeyUsages, nil } func (h *Helper) keyUsagesMatch(aKU x509.KeyUsage, aEKU []x509.ExtKeyUsage, bKU x509.KeyUsage, bEKU []x509.ExtKeyUsage) bool { if aKU != bKU { return false } if len(aEKU) != len(bEKU) { return false } sort.SliceStable(aEKU, func(i, j int) bool { return aEKU[i] < aEKU[j] }) sort.SliceStable(bEKU, func(i, j int) bool { return bEKU[i] < bEKU[j] }) for i := range aEKU { if aEKU[i] != bEKU[i] { return false } } return true }
1
28,866
Does this make it easier to read? Previously this would have run `kubectl describe certificate` right? In which case it would have included events related to the cert. Will we miss that? Would the old `kubectl describe` output have gone to stderr?
jetstack-cert-manager
go
@@ -24,11 +24,14 @@ import com.google.gson.annotations.SerializedName; public class Internals { public static final String SERIALIZED_APPLICATION_SERVER = "application-server"; + public static final String SERIALIZED_CAMUNDA_INTEGRATION = "camunda-integration"; protected Database database; @SerializedName(value = SERIALIZED_APPLICATION_SERVER) protected ApplicationServer applicationServer; protected Map<String, Command> commands; + @SerializedName(value = SERIALIZED_CAMUNDA_INTEGRATION) + protected Map<String, Object> camundaIntegration; protected Map<String, Metric> metrics;
1
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.impl.telemetry.dto; import java.util.HashMap; import java.util.Map; import com.google.gson.annotations.SerializedName; public class Internals { public static final String SERIALIZED_APPLICATION_SERVER = "application-server"; protected Database database; @SerializedName(value = SERIALIZED_APPLICATION_SERVER) protected ApplicationServer applicationServer; protected Map<String, Command> commands; protected Map<String, Metric> metrics; protected Jdk jdk; public Internals() { this(null, null, null); } public Internals(Database database, ApplicationServer server, Jdk jdk) { this.database = database; this.applicationServer = server; this.commands = new HashMap<>(); this.jdk = jdk; } public Internals(Internals internals) { this(internals.database, internals.applicationServer, internals.jdk); this.commands = internals.getCommands(); this.metrics = internals.getMetrics(); } public Database getDatabase() { return database; } public void setDatabase(Database database) { this.database = database; } public ApplicationServer getApplicationServer() { return applicationServer; } public void setApplicationServer(ApplicationServer applicationServer) { this.applicationServer = applicationServer; } public Map<String, Command> getCommands() { return commands; } public void setCommands(Map<String, Command> commands) { this.commands = commands; } public Map<String, Metric> getMetrics() { return metrics; } public void setMetrics(Map<String, Metric> metrics) { this.metrics = metrics; } public void mergeDynamicData(Internals other) { this.commands = other.commands; this.metrics = other.metrics; } public Jdk getJdk() { return jdk; } public void setJdk(Jdk jdk) { this.jdk = jdk; } }
1
10,866
Just an understanding question: do we need a map here, would a list/set of Strings be sufficient? we are not adding entries with `false` here, are we? Being in the list already means it's used, right?
camunda-camunda-bpm-platform
java
@@ -567,7 +567,8 @@ class CentralPlannerScheduler(Scheduler): for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) - if task.id not in necessary_tasks and self._state.prune(task, self._config): + removed = self._state.prune(task, self._config) + if removed and task.id not in necessary_tasks: remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks)
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The system for scheduling tasks and executing them in order. Deals with dependencies, priorities, resources, etc. The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them. See :doc:`/central_scheduler` for more info. """ import collections try: import cPickle as pickle except ImportError: import pickle import datetime import functools import itertools import logging import os import time from luigi import six from luigi import configuration from luigi import notifications from luigi import parameter from luigi import task_history as history from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN from luigi.task import Config logger = logging.getLogger("luigi.server") class Scheduler(object): """ Abstract base class. Note that the methods all take string arguments, not Task objects... """"" add_task = NotImplemented get_work = NotImplemented ping = NotImplemented UPSTREAM_RUNNING = 'UPSTREAM_RUNNING' UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT' UPSTREAM_FAILED = 'UPSTREAM_FAILED' UPSTREAM_DISABLED = 'UPSTREAM_DISABLED' UPSTREAM_SEVERITY_ORDER = ( '', UPSTREAM_RUNNING, UPSTREAM_MISSING_INPUT, UPSTREAM_FAILED, UPSTREAM_DISABLED, ) UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index STATUS_TO_UPSTREAM_MAP = { FAILED: UPSTREAM_FAILED, RUNNING: UPSTREAM_RUNNING, PENDING: UPSTREAM_MISSING_INPUT, DISABLED: UPSTREAM_DISABLED, } class scheduler(Config): # TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility # at some point (in particular this would force users to replace all dashes with underscores in the config) retry_delay = parameter.FloatParameter(default=900.0) remove_delay = parameter.FloatParameter(default=600.0) worker_disconnect_delay = parameter.FloatParameter(default=60.0) state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle') # Jobs are disabled if we see more than disable_failures failures in disable_window seconds. # These disables last for disable_persist seconds. disable_window = parameter.IntParameter(default=3600, config_path=dict(section='scheduler', name='disable-window-seconds')) disable_failures = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-num-failures')) disable_hard_timeout = parameter.IntParameter(default=None, config_path=dict(section='scheduler', name='disable-hard-timeout')) disable_persist = parameter.IntParameter(default=86400, config_path=dict(section='scheduler', name='disable-persist-seconds')) max_shown_tasks = parameter.IntParameter(default=100000) max_graph_nodes = parameter.IntParameter(default=100000) prune_done_tasks = parameter.BoolParameter(default=False) record_task_history = parameter.BoolParameter(default=False) prune_on_get_work = parameter.BoolParameter(default=False) def fix_time(x): # Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects # Let's remove this function soon if isinstance(x, datetime.datetime): return time.mktime(x.timetuple()) else: return x class Failures(object): """ This class tracks the number of failures in a given time window. Failures added are marked with the current timestamp, and this class counts the number of failures in a sliding time window ending at the present. """ def __init__(self, window): """ Initialize with the given window. :param window: how long to track failures for, as a float (number of seconds). """ self.window = window self.failures = collections.deque() self.first_failure_time = None def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time) def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and fix_time(self.failures[0]) < min_time: self.failures.popleft() return len(self.failures) def clear(self): """ Clear the failure queue. """ self.failures.clear() def _get_default(x, default): if x is not None: return x else: return default class Task(object): def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None, params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None, tracking_url=None): self.id = task_id self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active) self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active if deps is None: self.deps = set() else: self.deps = set(deps) self.status = status # PENDING, RUNNING, FAILED or DONE self.time = time.time() # Timestamp when task was first added self.retry = None self.remove = None self.worker_running = None # the worker id that is currently running the task or None self.time_running = None # Timestamp when picked up by worker self.expl = None self.priority = priority self.resources = _get_default(resources, {}) self.family = family self.module = module self.params = _get_default(params, {}) self.disable_failures = disable_failures self.disable_hard_timeout = disable_hard_timeout self.failures = Failures(disable_window) self.tracking_url = tracking_url self.scheduler_disable_time = None self.runnable = False def __repr__(self): return "Task(%r)" % vars(self) def add_failure(self): self.failures.add_failure() def has_excessive_failures(self): if (self.failures.first_failure_time is not None and self.disable_hard_timeout): if (time.time() >= self.failures.first_failure_time + self.disable_hard_timeout): return True if self.failures.num_failures() >= self.disable_failures: return True return False def can_disable(self): return (self.disable_failures is not None or self.disable_hard_timeout is not None) class Worker(object): """ Structure for tracking worker activity and keeping their references. """ def __init__(self, worker_id, last_active=None): self.id = worker_id self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host) self.last_active = last_active or time.time() # seconds since epoch self.last_get_work = None self.started = time.time() # seconds since epoch self.tasks = set() # task objects self.info = {} def add_info(self, info): self.info.update(info) def update(self, worker_reference, get_work=False): if worker_reference: self.reference = worker_reference self.last_active = time.time() if get_work: self.last_get_work = time.time() def prune(self, config): # Delete workers that haven't said anything for a while (probably killed) if self.last_active + config.worker_disconnect_delay < time.time(): return True def get_pending_tasks(self, state): """ Get PENDING (and RUNNING) tasks for this worker. You have to pass in the state for optimization reasons. """ if len(self.tasks) < state.num_pending_tasks(): return six.moves.filter(lambda task: task.status in [PENDING, RUNNING], self.tasks) else: return state.get_pending_tasks() def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_pending_tasks(state)) @property def assistant(self): return self.info.get('assistant', False) def __str__(self): return self.id class SimpleTaskState(object): """ Keep track of the current state and handle persistance. The point of this class is to enable other ways to keep state, eg. by using a database These will be implemented by creating an abstract base class that this and other classes inherit from. """ def __init__(self, state_path): self._state_path = state_path self._tasks = {} # map from id to a Task object self._status_tasks = collections.defaultdict(dict) self._active_workers = {} # map from id to a Worker object def get_state(self): return self._tasks, self._active_workers def set_state(self, state): self._tasks, self._active_workers = state def dump(self): try: with open(self._state_path, 'wb') as fobj: pickle.dump(self.get_state(), fobj) except IOError: logger.warning("Failed saving scheduler state", exc_info=1) else: logger.info("Saved state in %s", self._state_path) # prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control? def load(self): if os.path.exists(self._state_path): logger.info("Attempting to load state from %s", self._state_path) try: with open(self._state_path, 'rb') as fobj: state = pickle.load(fobj) except BaseException: logger.exception("Error when loading state. Starting from clean slate.") return self.set_state(state) self._status_tasks = collections.defaultdict(dict) for task in six.itervalues(self._tasks): self._status_tasks[task.status][task.id] = task # Convert from old format # TODO: this is really ugly, we need something more future-proof # Every time we add an attribute to the Worker or Task class, this # code needs to be updated # Compatibility since 2014-06-02 for k, v in six.iteritems(self._active_workers): if isinstance(v, float): self._active_workers[k] = Worker(worker_id=k, last_active=v) # Compatibility since 2015-05-28 if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)): # If you load from an old format where Workers don't contain tasks. for k, worker in six.iteritems(self._active_workers): worker.tasks = set() for task in six.itervalues(self._tasks): for worker_id in task.workers: self._active_workers[worker_id].tasks.add(task) # Compatibility since 2015-04-28 if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)): for t in six.itervalues(self._tasks): t.disable_hard_timeout = None else: logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path) def get_active_tasks(self, status=None): if status: for task in six.itervalues(self._status_tasks[status]): yield task else: for task in six.itervalues(self._tasks): yield task def get_running_tasks(self): return six.itervalues(self._status_tasks[RUNNING]) def get_pending_tasks(self): return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status]) for status in [PENDING, RUNNING]) def num_pending_tasks(self): """ Return how many tasks are PENDING + RUNNING. O(1). """ return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) def get_task(self, task_id, default=None, setdefault=None): if setdefault: task = self._tasks.setdefault(task_id, setdefault) self._status_tasks[task.status][task.id] = task return task else: return self._tasks.get(task_id, default) def has_task(self, task_id): return task_id in self._tasks def re_enable(self, task, config=None): task.scheduler_disable_time = None task.failures.clear() if config: self.set_status(task, FAILED, config) task.failures.clear() def set_status(self, task, new_status, config=None): if new_status == FAILED: assert config is not None if new_status == DISABLED and task.status == RUNNING: return if task.status == DISABLED: if new_status == DONE: self.re_enable(task) # don't allow workers to override a scheduler disable elif task.scheduler_disable_time is not None and new_status != DISABLED: return if new_status == FAILED and task.can_disable() and task.status != DISABLED: task.add_failure() if task.has_excessive_failures(): task.scheduler_disable_time = time.time() new_status = DISABLED notifications.send_error_email( 'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id), '{task} failed {failures} times in the last {window} seconds, so it is being ' 'disabled for {persist} seconds'.format( failures=config.disable_failures, task=task.id, window=config.disable_window, persist=config.disable_persist, )) elif new_status == DISABLED: task.scheduler_disable_time = None self._status_tasks[task.status].pop(task.id) self._status_tasks[new_status][task.id] = task task.status = new_status def fail_dead_worker_task(self, task, config, assistants): # If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants: logger.info("Task %r is marked as running by disconnected worker %r -> marking as " "FAILED with retry delay of %rs", task.id, task.worker_running, config.retry_delay) task.worker_running = None self.set_status(task, FAILED, config) task.retry = time.time() + config.retry_delay def prune(self, task, config): remove = False # Mark tasks with no remaining active stakeholders for deletion if not task.stakeholders: if task.remove is None: logger.info("Task %r has stakeholders %r but none remain connected -> will remove " "task in %s seconds", task.id, task.stakeholders, config.remove_delay) task.remove = time.time() + config.remove_delay # Re-enable task after the disable time expires if task.status == DISABLED and task.scheduler_disable_time is not None: if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist: self.re_enable(task, config) # Remove tasks that have no stakeholders if task.remove and time.time() > task.remove: logger.info("Removing task %r (no connected stakeholders)", task.id) remove = True # Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0 if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time(): self.set_status(task, PENDING, config) return remove def inactivate_tasks(self, delete_tasks): # The terminology is a bit confusing: we used to "delete" tasks when they became inactive, # but with a pluggable state storage, you might very well want to keep some history of # older tasks as well. That's why we call it "inactivate" (as in the verb) for task in delete_tasks: task_obj = self._tasks.pop(task) self._status_tasks[task_obj.status].pop(task) def get_active_workers(self, last_active_lt=None, last_get_work_gt=None): for worker in six.itervalues(self._active_workers): if last_active_lt is not None and worker.last_active >= last_active_lt: continue last_get_work = getattr(worker, 'last_get_work', None) if last_get_work_gt is not None and ( last_get_work is None or last_get_work <= last_get_work_gt): continue yield worker def get_assistants(self, last_active_lt=None): return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt)) def get_worker_ids(self): return self._active_workers.keys() # only used for unit tests def get_worker(self, worker_id): return self._active_workers.setdefault(worker_id, Worker(worker_id)) def inactivate_workers(self, delete_workers): # Mark workers as inactive for worker in delete_workers: self._active_workers.pop(worker) # remove workers from tasks for task in self.get_active_tasks(): task.stakeholders.difference_update(delete_workers) task.workers.difference_update(delete_workers) def get_necessary_tasks(self): necessary_tasks = set() for task in self.get_active_tasks(): if task.status not in (DONE, DISABLED) or \ getattr(task, 'scheduler_disable_time', None) is not None: necessary_tasks.update(task.deps) necessary_tasks.add(task.id) return necessary_tasks class CentralPlannerScheduler(Scheduler): """ Async scheduler that can handle multiple workers, etc. Can be run locally or on a server (using RemoteScheduler + server.Server). """ def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs): """ Keyword Arguments: :param config: an object of class "scheduler" or None (in which the global instance will be used) :param resources: a dict of str->int constraints :param task_history_override: ignore config and use this object as the task history """ self._config = config or scheduler(**kwargs) self._state = SimpleTaskState(self._config.state_path) if task_history_impl: self._task_history = task_history_impl elif self._config.record_task_history: from luigi import db_task_history # Needs sqlalchemy, thus imported here self._task_history = db_task_history.DbTaskHistory() else: self._task_history = history.NopHistory() self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter? self._make_task = functools.partial( Task, disable_failures=self._config.disable_failures, disable_hard_timeout=self._config.disable_hard_timeout, disable_window=self._config.disable_window) self._worker_requests = {} def load(self): self._state.load() def dump(self): self._state.dump() def prune(self): logger.info("Starting pruning of task graph") remove_workers = [] for worker in self._state.get_active_workers(): if worker.prune(self._config): logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay) remove_workers.append(worker.id) self._state.inactivate_workers(remove_workers) assistant_ids = set(w.id for w in self._state.get_assistants()) remove_tasks = [] if assistant_ids: necessary_tasks = self._state.get_necessary_tasks() else: necessary_tasks = () for task in self._state.get_active_tasks(): self._state.fail_dead_worker_task(task, self._config, assistant_ids) if task.id not in necessary_tasks and self._state.prune(task, self._config): remove_tasks.append(task.id) self._state.inactivate_tasks(remove_tasks) logger.info("Done pruning task graph") def update(self, worker_id, worker_reference=None, get_work=False): """ Keep track of whenever the worker was last active. """ worker = self._state.get_worker(worker_id) worker.update(worker_reference, get_work=get_work) def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker) def add_task(self, task_id=None, status=PENDING, runnable=True, deps=None, new_deps=None, expl=None, resources=None, priority=0, family='', module=None, params=None, assistant=False, tracking_url=None, **kwargs): """ * add task identified by task_id if it doesn't exist * if deps is not None, update dependency list * update status of task * add additional workers/stakeholders * update priority when needed """ worker_id = kwargs['worker'] self.update(worker_id) task = self._state.get_task(task_id, setdefault=self._make_task( task_id=task_id, status=PENDING, deps=deps, resources=resources, priority=priority, family=family, module=module, params=params)) # for setting priority, we'll sometimes create tasks with unset family and params if not task.family: task.family = family if not getattr(task, 'module', None): task.module = module if not task.params: task.params = _get_default(params, {}) if tracking_url is not None or task.status != RUNNING: task.tracking_url = tracking_url if task.remove is not None: task.remove = None # unmark task for removal so it isn't removed after being added if expl is not None: task.expl = expl if not (task.status == RUNNING and status == PENDING) or new_deps: # don't allow re-scheduling of task while it is running, it must either fail or succeed first if status == PENDING or status != task.status: # Update the DB only if there was a acctual change, to prevent noise. # We also check for status == PENDING b/c that's the default value # (so checking for status != task.status woule lie) self._update_task_history(task, status) self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config) if status == FAILED: task.retry = self._retry_time(task, self._config) if deps is not None: task.deps = set(deps) if new_deps is not None: task.deps.update(new_deps) if resources is not None: task.resources = resources if not assistant: task.stakeholders.add(worker_id) # Task dependencies might not exist yet. Let's create dummy tasks for them for now. # Otherwise the task dependencies might end up being pruned if scheduling takes a long time for dep in task.deps or []: t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority)) t.stakeholders.add(worker_id) self._update_priority(task, priority, worker_id) if runnable: task.workers.add(worker_id) self._state.get_worker(worker_id).tasks.add(task) task.runnable = runnable def add_worker(self, worker, info, **kwargs): self._state.get_worker(worker).add_info(info) def update_resources(self, **resources): if self._resources is None: self._resources = {} self._resources.update(resources) def _has_resources(self, needed_resources, used_resources): if needed_resources is None: return True available_resources = self._resources or {} for resource, amount in six.iteritems(needed_resources): if amount + used_resources[resource] > available_resources.get(resource, 1): return False return True def _used_resources(self): used_resources = collections.defaultdict(int) if self._resources is not None: for task in self._state.get_active_tasks(): if task.status == RUNNING and task.resources: for resource, amount in six.iteritems(task.resources): used_resources[resource] += amount return used_resources def _rank(self, task): """ Return worker's rank function for task scheduling. :return: """ return task.priority, -task.time def _schedulable(self, task): if task.status != PENDING: return False for dep in task.deps: dep_task = self._state.get_task(dep, default=None) if dep_task is None or dep_task.status != DONE: return False return True def _retry_time(self, task, config): return time.time() + config.retry_delay def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs): # TODO: remove any expired nodes # Algo: iterate over all nodes, find the highest priority node no dependencies and available # resources. # Resource checking looks both at currently available resources and at which resources would # be available if all running tasks died and we rescheduled all workers greedily. We do both # checks in order to prevent a worker with many low-priority tasks from starving other # workers with higher priority tasks that share the same resources. # TODO: remove tasks that can't be done, figure out if the worker has absolutely # nothing it can wait for if self._config.prune_on_get_work: self.prune() worker_id = kwargs['worker'] # Return remaining tasks that have no FAILED descendants self.update(worker_id, {'host': host}, get_work=True) if assistant: self.add_worker(worker_id, [('assistant', assistant)]) best_task = None if current_tasks is not None: ct_set = set(current_tasks) for task in sorted(self._state.get_running_tasks(), key=self._rank): if task.worker_running == worker_id and task.id not in ct_set: best_task = task locally_pending_tasks = 0 running_tasks = [] upstream_table = {} greedy_resources = collections.defaultdict(int) n_unique_pending = 0 worker = self._state.get_worker(worker_id) if worker.is_trivial_worker(self._state): relevant_tasks = worker.get_pending_tasks(self._state) used_resources = collections.defaultdict(int) greedy_workers = dict() # If there's no resources, then they can grab any task else: relevant_tasks = self._state.get_pending_tasks() used_resources = self._used_resources() activity_limit = time.time() - self._config.worker_disconnect_delay active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit) greedy_workers = dict((worker.id, worker.info.get('workers', 1)) for worker in active_workers) tasks = list(relevant_tasks) tasks.sort(key=self._rank, reverse=True) for task in tasks: upstream_status = self._upstream_status(task.id, upstream_table) in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers if task.status == RUNNING and in_workers: # Return a list of currently running tasks to the client, # makes it easier to troubleshoot other_worker = self._state.get_worker(task.worker_running) more_info = {'task_id': task.id, 'worker': str(other_worker)} if other_worker is not None: more_info.update(other_worker.info) running_tasks.append(more_info) if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED: locally_pending_tasks += 1 if len(task.workers) == 1 and not assistant: n_unique_pending += 1 if best_task: continue if task.status == RUNNING and (task.worker_running in greedy_workers): greedy_workers[task.worker_running] -= 1 for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount if self._schedulable(task) and self._has_resources(task.resources, greedy_resources): if in_workers and self._has_resources(task.resources, used_resources): best_task = task else: workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers for task_worker in workers: if greedy_workers.get(task_worker, 0) > 0: # use up a worker greedy_workers[task_worker] -= 1 # keep track of the resources used in greedy scheduling for resource, amount in six.iteritems((task.resources or {})): greedy_resources[resource] += amount break reply = {'n_pending_tasks': locally_pending_tasks, 'running_tasks': running_tasks, 'task_id': None, 'n_unique_pending': n_unique_pending} if best_task: self._state.set_status(best_task, RUNNING, self._config) best_task.worker_running = worker_id best_task.time_running = time.time() self._update_task_history(best_task, RUNNING, host=host) reply['task_id'] = best_task.id reply['task_family'] = best_task.family reply['task_module'] = getattr(best_task, 'module', None) reply['task_params'] = best_task.params return reply def ping(self, **kwargs): worker_id = kwargs['worker'] self.update(worker_id) def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: return upstream_status_table[task_id] elif self._state.has_task(task_id): task_stack = [task_id] while task_stack: dep_id = task_stack.pop() if self._state.has_task(dep_id): dep = self._state.get_task(dep_id) if dep.status == DONE: continue if dep_id not in upstream_status_table: if dep.status == PENDING and dep.deps: task_stack = task_stack + [dep_id] + list(dep.deps) upstream_status_table[dep_id] = '' # will be updated postorder else: dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '') upstream_status_table[dep_id] = dep_status elif upstream_status_table[dep_id] == '' and dep.deps: # This is the postorder update step when we set the # status based on the previously calculated child elements upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps] upstream_status.append('') # to handle empty list status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY) upstream_status_table[dep_id] = status return upstream_status_table[dep_id] def _serialize_task(self, task_id, include_deps=True, deps=None): task = self._state.get_task(task_id) ret = { 'status': task.status, 'workers': list(task.workers), 'worker_running': task.worker_running, 'time_running': getattr(task, "time_running", None), 'start_time': task.time, 'params': task.params, 'name': task.family, 'priority': task.priority, 'resources': task.resources, 'tracking_url': getattr(task, "tracking_url", None), } if task.status == DISABLED: ret['re_enable_able'] = task.scheduler_disable_time is not None if include_deps: ret['deps'] = list(task.deps if deps is None else deps) return ret def graph(self, **kwargs): self.prune() serialized = {} seen = set() for task in self._state.get_active_tasks(): serialized.update(self._traverse_graph(task.id, seen)) return serialized def _traverse_graph(self, root_task_id, seen=None, dep_func=None): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: dep_func = lambda t: t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.warn('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family, params = UNKNOWN, {} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'priority': 0, } else: deps = dep_func(task) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if len(serialized) >= self._config.max_graph_nodes: break return serialized def dep_graph(self, task_id, **kwargs): self.prune() if not self._state.has_task(task_id): return {} return self._traverse_graph(task_id) def inverse_dep_graph(self, task_id, **kwargs): self.prune() if not self._state.has_task(task_id): return {} inverse_graph = collections.defaultdict(set) for task in self._state.get_active_tasks(): for dep in task.deps: inverse_graph[dep].add(task.id) return self._traverse_graph(task_id, dep_func=lambda t: inverse_graph[t.id]) def task_list(self, status, upstream_status, limit=True, search=None, **kwargs): """ Query for a subset of tasks by status. """ self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: filter_func = lambda _: True else: terms = search.split() filter_func = lambda t: all(term in t.id for term in terms) for task in filter(filter_func, self._state.get_active_tasks(status)): if (task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table)): serialized = self._serialize_task(task.id, False) result[task.id] = serialized if limit and len(result) > self._config.max_shown_tasks: return {'num_tasks': len(result)} return result def worker_list(self, include_running=True, **kwargs): self.prune() workers = [ dict( name=worker.id, last_active=worker.last_active, started=getattr(worker, 'started', None), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) if include_running: running = collections.defaultdict(dict) num_pending = collections.defaultdict(int) num_uniques = collections.defaultdict(int) for task in self._state.get_pending_tasks(): if task.status == RUNNING and task.worker_running: running[task.worker_running][task.id] = self._serialize_task(task.id, False) elif task.status == PENDING: for worker in task.workers: num_pending[worker] += 1 if len(task.workers) == 1: num_uniques[list(task.workers)[0]] += 1 for worker in workers: tasks = running[worker['name']] worker['num_running'] = len(tasks) worker['num_pending'] = num_pending[worker['name']] worker['num_uniques'] = num_uniques[worker['name']] worker['running'] = tasks return workers def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, False) result[task.status][task.id] = serialized return result def re_enable_task(self, task_id): serialized = {} task = self._state.get_task(task_id) if task and task.status == DISABLED and task.scheduler_disable_time: self._state.re_enable(task, self._config) serialized = self._serialize_task(task_id) return serialized def fetch_error(self, task_id, **kwargs): if self._state.has_task(task_id): return {"taskId": task_id, "error": self._state.get_task(task_id).expl} else: return {"taskId": task_id, "error": ""} def _update_task_history(self, task, status, host=None): try: if status == DONE or status == FAILED: successful = (status == DONE) self._task_history.task_finished(task, successful) elif status == PENDING: self._task_history.task_scheduled(task) elif status == RUNNING: self._task_history.task_started(task, host) except BaseException: logger.warning("Error saving Task history", exc_info=True) @property def task_history(self): # Used by server.py to expose the calls return self._task_history
1
14,010
Ah, so the change is that you want the `prune()` side effect before doing the `task.id not in necessary_tasks` membership check? Stupid non-Haskell programming languages :p
spotify-luigi
py
@@ -2,6 +2,13 @@ package cmd import ( "fmt" + "os" + osexec "os/exec" + "path/filepath" + "strings" + "testing" + "time" + "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/exec" "github.com/drud/ddev/pkg/fileutil"
1
package cmd import ( "fmt" "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/exec" "github.com/drud/ddev/pkg/fileutil" "github.com/drud/ddev/pkg/nodeps" "github.com/drud/ddev/pkg/testcommon" "github.com/drud/ddev/pkg/util" asrt "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "os" osexec "os/exec" "path/filepath" "strings" "testing" "time" ) // TestCustomCommands does basic checks to make sure custom commands work OK. func TestCustomCommands(t *testing.T) { assert := asrt.New(t) runTime := util.TimeTrack(time.Now(), "ddev list") tmpHome := testcommon.CreateTmpDir(t.Name() + "tempHome") origHome := os.Getenv("HOME") origDebug := os.Getenv("DDEV_DEBUG") // Change the homedir temporarily err := os.Setenv("HOME", tmpHome) require.NoError(t, err) _ = os.Setenv("DDEV_DEBUG", "") pwd, _ := os.Getwd() testCustomCommandsDir := filepath.Join(pwd, "testdata", t.Name()) site := TestSites[0] switchDir := TestSites[0].Chdir() app, _ := ddevapp.NewApp(TestSites[0].Dir, false, "") origType := app.Type t.Cleanup(func() { runTime() app.Type = origType _ = app.WriteConfig() _ = os.RemoveAll(tmpHome) _ = os.Setenv("HOME", origHome) _ = os.Setenv("DDEV_DEBUG", origDebug) _ = fileutil.PurgeDirectory(filepath.Join(site.Dir, ".ddev", "commands")) _ = fileutil.PurgeDirectory(filepath.Join(site.Dir, ".ddev", ".global_commands")) switchDir() }) err = app.Start() require.NoError(t, err) // We can't use the standard getGlobalDDevDir here because *our* global hasn't changed. // It's changed via $HOME for the ddev subprocess err = os.MkdirAll(filepath.Join(tmpHome, ".ddev"), 0755) assert.NoError(err) tmpHomeGlobalCommandsDir := filepath.Join(tmpHome, ".ddev", "commands") err = os.RemoveAll(tmpHomeGlobalCommandsDir) assert.NoError(err) projectCommandsDir := app.GetConfigPath("commands") globalCommandsDir := app.GetConfigPath(".global_commands") _ = os.RemoveAll(globalCommandsDir) err = fileutil.CopyDir(filepath.Join(testCustomCommandsDir, "global_commands"), tmpHomeGlobalCommandsDir) require.NoError(t, err) assert.FileExists(filepath.Join(projectCommandsDir, "db", "mysql")) assert.FileExists(filepath.Join(projectCommandsDir, "host", "mysqlworkbench.example")) out, err := exec.RunCommand(DdevBin, []string{}) assert.NoError(err) assert.Contains(out, "mysql client in db container") // Test the `ddev mysql` command with stdin inputFile := filepath.Join(testCustomCommandsDir, "select99.sql") f, err := os.Open(inputFile) require.NoError(t, err) // nolint: errcheck defer f.Close() command := osexec.Command(DdevBin, "mysql") command.Stdin = f byteOut, err := command.CombinedOutput() require.NoError(t, err, "Failed ddev mysql; output=%v", string(byteOut)) assert.Contains(string(byteOut), "99\n99\n") _ = os.RemoveAll(projectCommandsDir) _ = os.RemoveAll(globalCommandsDir) // Now copy a project commands and global commands and make sure they show up and execute properly err = fileutil.CopyDir(filepath.Join(testCustomCommandsDir, "project_commands"), projectCommandsDir) assert.NoError(err) out, err = exec.RunCommand(DdevBin, []string{}) assert.NoError(err) assert.Contains(out, "testhostcmd project (shell host container command)") assert.Contains(out, "testwebcmd project (shell web container command)") assert.Contains(out, "testhostglobal global (global shell host container command)") assert.Contains(out, "testwebglobal global (global shell web container command)") assert.NotContains(out, "testhostcmd global") //the global testhostcmd should have been overridden by the projct one assert.NotContains(out, "testwebcmd global") //the global testwebcmd should have been overridden by the projct one for _, c := range []string{"testhostcmd", "testhostglobal", "testwebcmd", "testwebglobal"} { args := []string{c, "hostarg1", "hostarg2", "--hostflag1"} out, err = exec.RunCommand(DdevBin, args) assert.NoError(err, "Failed to run ddev %s %v", c, args) expectedHost, _ := os.Hostname() if !strings.Contains(c, "host") { expectedHost = site.Name + "-web" } assert.Contains(out, fmt.Sprintf("%s was executed with args=hostarg1 hostarg2 --hostflag1 on host %s", c, expectedHost)) } app.Type = nodeps.AppTypePHP err = app.WriteConfig() assert.NoError(err) // Make sure that all the official ddev-provided custom commands are usable by just checking help for _, c := range []string{"launch", "live", "mysql", "xdebug"} { _, err = exec.RunCommand(DdevBin, []string{c, "-h"}) assert.NoError(err, "Failed to run ddev %s -h", c) } // The various CMS commands should not be available here for _, c := range []string{"artisan", "drush", "magento", "typo3", "typo3cms", "wp"} { _, err = exec.RunCommand(DdevBin, []string{c, "-h"}) assert.Error(err, "found command %s when it should not have been there (no error) app.Type=%s", c, app.Type) } // TYPO3 commands should only be available for type typo3 app.Type = nodeps.AppTypeTYPO3 _ = app.WriteConfig() _, _ = exec.RunCommand(DdevBin, nil) for _, c := range []string{"typo3", "typo3cms"} { _, err = exec.RunCommand(DdevBin, []string{c, "-h"}) assert.NoError(err) } // Drupal types should only be available for type drupal* app.Type = nodeps.AppTypeDrupal9 _ = app.WriteConfig() _, _ = exec.RunCommand(DdevBin, nil) for _, c := range []string{"drush"} { _, err = exec.RunCommand(DdevBin, []string{c, "-h"}) assert.NoError(err) } // Laravel types should only be available for type laravel app.Type = nodeps.AppTypeLaravel _ = app.WriteConfig() _, _ = exec.RunCommand(DdevBin, nil) for _, c := range []string{"artisan"} { _, err = exec.RunCommand(DdevBin, []string{c, "-h"}) assert.NoError(err) } // Wordpress types should only be available for type drupal* app.Type = nodeps.AppTypeWordPress _ = app.WriteConfig() _, _ = exec.RunCommand(DdevBin, nil) for _, c := range []string{"wp"} { _, err = exec.RunCommand(DdevBin, []string{c, "-h"}) assert.NoError(err, "expected to find command %s for app.Type=%s", c, app.Type) } // Make sure that the non-command stuff we installed is there for _, f := range []string{"db/mysqldump.example", "db/README.txt", "web/README.txt", "host/README.txt", "host/phpstorm.example"} { assert.FileExists(filepath.Join(projectCommandsDir, f)) assert.FileExists(filepath.Join(globalCommandsDir, f)) } // Make sure we haven't accidentally created anything inappropriate in ~/.ddev assert.False(fileutil.FileExists(filepath.Join(tmpHome, ".ddev", ".globalcommands"))) assert.False(fileutil.FileExists(filepath.Join(origHome, ".ddev", ".globalcommands"))) } // TestLaunchCommand tests that the launch command behaves all the ways it should behave func TestLaunchCommand(t *testing.T) { assert := asrt.New(t) pwd, _ := os.Getwd() // Create a temporary directory and switch to it. tmpdir := testcommon.CreateTmpDir(t.Name()) err := os.Chdir(tmpdir) assert.NoError(err) _ = os.Setenv("DDEV_DEBUG", "true") app, err := ddevapp.NewApp(tmpdir, false, "") require.NoError(t, err) err = app.WriteConfig() require.NoError(t, err) t.Cleanup(func() { err = app.Stop(true, false) assert.NoError(err) err = os.Chdir(pwd) assert.NoError(err) _ = os.RemoveAll(tmpdir) }) // This only tests the https port changes, but that might be enough app.RouterHTTPSPort = "8443" _ = app.WriteConfig() err = app.Start() require.NoError(t, err) desc, err := app.Describe(false) require.NoError(t, err) cases := map[string]string{ "": app.GetPrimaryURL(), "-p": desc["phpmyadmin_https_url"].(string), "-m": desc["mailhog_https_url"].(string), } for partialCommand, expect := range cases { // Try with the base URL, simplest case c := DdevBin + ` launch ` + partialCommand + ` | awk '/FULLURL/ {print $2}'` out, err := exec.RunCommand("bash", []string{"-c", c}) out = strings.Trim(out, "\n") assert.NoError(err, `couldn't run "%s"", output=%s`, c, out) assert.Contains(out, expect, "ouptput of %s is incorrect with app.RouterHTTPSPort=%s: %s", c, app.RouterHTTPSPort, out) } } // TestMysqlCommand tests `ddev mysql`` func TestMysqlCommand(t *testing.T) { assert := asrt.New(t) // Create a temporary directory and switch to it. tmpdir := testcommon.CreateTmpDir(t.Name()) defer testcommon.CleanupDir(tmpdir) defer testcommon.Chdir(tmpdir)() app, err := ddevapp.NewApp(tmpdir, false, "") require.NoError(t, err) err = app.WriteConfig() require.NoError(t, err) err = app.Start() require.NoError(t, err) defer func() { _ = app.Stop(true, false) }() // Test ddev mysql -uroot -proot mysql command := osexec.Command("bash", "-c", "echo 'SHOW TABLES;' | "+DdevBin+" mysql --user=root --password=root --database=mysql") byteOut, err := command.CombinedOutput() assert.NoError(err, "byteOut=%v", string(byteOut)) assert.Contains(string(byteOut), `Tables_in_mysql column_stats columns_priv`) }
1
14,675
This change was not really intended but made by the linter of VS Code. And looking at other packages this looks like a best practise to place interal packages on the top and gh imports afterwards.
drud-ddev
go
@@ -494,6 +494,8 @@ function findAndModify(coll, query, sort, doc, options, callback) { queryObject.bypassDocumentValidation = finalOptions.bypassDocumentValidation; } + finalOptions.readPreference = ReadPreference.primary; + // Have we specified collation decorateWithCollation(queryObject, coll, finalOptions);
1
'use strict'; const applyWriteConcern = require('../utils').applyWriteConcern; const checkCollectionName = require('../utils').checkCollectionName; const Code = require('mongodb-core').BSON.Code; const createIndexDb = require('./db_ops').createIndex; const decorateCommand = require('../utils').decorateCommand; const decorateWithCollation = require('../utils').decorateWithCollation; const decorateWithReadConcern = require('../utils').decorateWithReadConcern; const ensureIndexDb = require('./db_ops').ensureIndex; const evaluate = require('./db_ops').evaluate; const executeCommand = require('./db_ops').executeCommand; const executeDbAdminCommand = require('./db_ops').executeDbAdminCommand; const formattedOrderClause = require('../utils').formattedOrderClause; const resolveReadPreference = require('../utils').resolveReadPreference; const handleCallback = require('../utils').handleCallback; const indexInformationDb = require('./db_ops').indexInformation; const isObject = require('../utils').isObject; const Long = require('mongodb-core').BSON.Long; const MongoError = require('mongodb-core').MongoError; const ReadPreference = require('mongodb-core').ReadPreference; const toError = require('../utils').toError; /** * Group function helper * @ignore */ // var groupFunction = function () { // var c = db[ns].find(condition); // var map = new Map(); // var reduce_function = reduce; // // while (c.hasNext()) { // var obj = c.next(); // var key = {}; // // for (var i = 0, len = keys.length; i < len; ++i) { // var k = keys[i]; // key[k] = obj[k]; // } // // var aggObj = map.get(key); // // if (aggObj == null) { // var newObj = Object.extend({}, key); // aggObj = Object.extend(newObj, initial); // map.put(key, aggObj); // } // // reduce_function(obj, aggObj); // } // // return { "result": map.values() }; // }.toString(); const groupFunction = 'function () {\nvar c = db[ns].find(condition);\nvar map = new Map();\nvar reduce_function = reduce;\n\nwhile (c.hasNext()) {\nvar obj = c.next();\nvar key = {};\n\nfor (var i = 0, len = keys.length; i < len; ++i) {\nvar k = keys[i];\nkey[k] = obj[k];\n}\n\nvar aggObj = map.get(key);\n\nif (aggObj == null) {\nvar newObj = Object.extend({}, key);\naggObj = Object.extend(newObj, initial);\nmap.put(key, aggObj);\n}\n\nreduce_function(obj, aggObj);\n}\n\nreturn { "result": map.values() };\n}'; /** * Perform a bulkWrite operation. See Collection.prototype.bulkWrite for more information. * * @method * @param {Collection} a Collection instance. * @param {object[]} operations Bulk operations to perform. * @param {object} [options] Optional settings. See Collection.prototype.bulkWrite for a list of options. * @param {Collection~bulkWriteOpCallback} [callback] The command result callback */ function bulkWrite(coll, operations, options, callback) { // Add ignoreUndfined if (coll.s.options.ignoreUndefined) { options = Object.assign({}, options); options.ignoreUndefined = coll.s.options.ignoreUndefined; } // Create the bulk operation const bulk = options.ordered === true || options.ordered == null ? coll.initializeOrderedBulkOp(options) : coll.initializeUnorderedBulkOp(options); // Do we have a collation let collation = false; // for each op go through and add to the bulk try { for (let i = 0; i < operations.length; i++) { // Get the operation type const key = Object.keys(operations[i])[0]; // Check if we have a collation if (operations[i][key].collation) { collation = true; } // Pass to the raw bulk bulk.raw(operations[i]); } } catch (err) { return callback(err, null); } // Final options for write concern const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); const writeCon = finalOptions.writeConcern ? finalOptions.writeConcern : {}; const capabilities = coll.s.topology.capabilities(); // Did the user pass in a collation, check if our write server supports it if (collation && capabilities && !capabilities.commandsTakeCollation) { return callback(new MongoError('server/primary/mongos does not support collation')); } // Execute the bulk bulk.execute(writeCon, finalOptions, (err, r) => { // We have connection level error if (!r && err) { return callback(err, null); } r.insertedCount = r.nInserted; r.matchedCount = r.nMatched; r.modifiedCount = r.nModified || 0; r.deletedCount = r.nRemoved; r.upsertedCount = r.getUpsertedIds().length; r.upsertedIds = {}; r.insertedIds = {}; // Update the n r.n = r.insertedCount; // Inserted documents const inserted = r.getInsertedIds(); // Map inserted ids for (let i = 0; i < inserted.length; i++) { r.insertedIds[inserted[i].index] = inserted[i]._id; } // Upserted documents const upserted = r.getUpsertedIds(); // Map upserted ids for (let i = 0; i < upserted.length; i++) { r.upsertedIds[upserted[i].index] = upserted[i]._id; } // Return the results callback(null, r); }); } // Check the update operation to ensure it has atomic operators. function checkForAtomicOperators(update) { const keys = Object.keys(update); // same errors as the server would give for update doc lacking atomic operators if (keys.length === 0) { return toError('The update operation document must contain at least one atomic operator.'); } if (keys[0][0] !== '$') { return toError('the update operation document must contain atomic operators.'); } } /** * Count the number of documents in the collection that match the query. * * @method * @param {Collection} a Collection instance. * @param {object} query The query for the count. * @param {object} [options] Optional settings. See Collection.prototype.count for a list of options. * @param {Collection~countCallback} [callback] The command result callback */ function count(coll, query, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, options); const skip = options.skip; const limit = options.limit; const hint = options.hint; const maxTimeMS = options.maxTimeMS; query = query || {}; // Final query const cmd = { count: coll.s.name, query: query }; // Add limit, skip and maxTimeMS if defined if (typeof skip === 'number') cmd.skip = skip; if (typeof limit === 'number') cmd.limit = limit; if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS; if (hint) cmd.hint = hint; // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Do we have a readConcern specified decorateWithReadConcern(cmd, coll, options); // Have we specified collation decorateWithCollation(cmd, coll, options); executeCommand(coll.s.db, cmd, options, (err, result) => { if (err) return handleCallback(callback, err); handleCallback(callback, null, result.n); }); } function countDocuments(coll, query, options, callback) { const skip = options.skip; const limit = options.limit; options = Object.assign({}, options); const pipeline = [{ $match: query }]; // Add skip and limit if defined if (typeof skip === 'number') { pipeline.push({ $skip: skip }); } if (typeof limit === 'number') { pipeline.push({ $limit: limit }); } pipeline.push({ $group: { _id: null, n: { $sum: 1 } } }); delete options.limit; delete options.skip; coll.aggregate(pipeline, options, (err, result) => { if (err) return handleCallback(callback, err); result.toArray((err, docs) => { if (err) return handleCallback(err); handleCallback(callback, null, docs.length ? docs[0].n : 0); }); }); } /** * Create an index on the db and collection. * * @method * @param {Collection} a Collection instance. * @param {(string|object)} fieldOrSpec Defines the index. * @param {object} [options] Optional settings. See Collection.prototype.createIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function createIndex(coll, fieldOrSpec, options, callback) { createIndexDb(coll.s.db, coll.s.name, fieldOrSpec, options, callback); } /** * Create multiple indexes in the collection. This method is only supported for * MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported * error. Index specifications are defined at http://docs.mongodb.org/manual/reference/command/createIndexes/. * * @method * @param {Collection} a Collection instance. * @param {array} indexSpecs An array of index specifications to be created * @param {Object} [options] Optional settings. See Collection.prototype.createIndexes for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function createIndexes(coll, indexSpecs, options, callback) { const capabilities = coll.s.topology.capabilities(); // Ensure we generate the correct name if the parameter is not set for (let i = 0; i < indexSpecs.length; i++) { if (indexSpecs[i].name == null) { const keys = []; // Did the user pass in a collation, check if our write server supports it if (indexSpecs[i].collation && capabilities && !capabilities.commandsTakeCollation) { return callback(new MongoError('server/primary/mongos does not support collation')); } for (let name in indexSpecs[i].key) { keys.push(`${name}_${indexSpecs[i].key[name]}`); } // Set the name indexSpecs[i].name = keys.join('_'); } } options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY }); // Execute the index executeCommand( coll.s.db, { createIndexes: coll.s.name, indexes: indexSpecs }, options, callback ); } function deleteCallback(err, r, callback) { if (callback == null) return; if (err && callback) return callback(err); if (r == null) return callback(null, { result: { ok: 1 } }); r.deletedCount = r.result.n; if (callback) callback(null, r); } /** * Delete multiple documents from the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the documents to remove * @param {object} [options] Optional settings. See Collection.prototype.deleteMany for a list of options. * @param {Collection~deleteWriteOpCallback} [callback] The command result callback */ function deleteMany(coll, filter, options, callback) { options.single = false; removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback)); } /** * Delete a single document from the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the document to remove * @param {object} [options] Optional settings. See Collection.prototype.deleteOne for a list of options. * @param {Collection~deleteWriteOpCallback} [callback] The command result callback */ function deleteOne(coll, filter, options, callback) { options.single = true; removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback)); } /** * Return a list of distinct values for the given key across a collection. * * @method * @param {Collection} a Collection instance. * @param {string} key Field of the document to find distinct values for. * @param {object} query The query for filtering the set of documents to which we apply the distinct filter. * @param {object} [options] Optional settings. See Collection.prototype.distinct for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function distinct(coll, key, query, options, callback) { // maxTimeMS option const maxTimeMS = options.maxTimeMS; // Distinct command const cmd = { distinct: coll.s.name, key: key, query: query }; options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Add maxTimeMS if defined if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS; // Do we have a readConcern specified decorateWithReadConcern(cmd, coll, options); // Have we specified collation decorateWithCollation(cmd, coll, options); // Execute the command executeCommand(coll.s.db, cmd, options, (err, result) => { if (err) return handleCallback(callback, err); handleCallback(callback, null, result.values); }); } /** * Drop an index from this collection. * * @method * @param {Collection} a Collection instance. * @param {string} indexName Name of the index to drop. * @param {object} [options] Optional settings. See Collection.prototype.dropIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function dropIndex(coll, indexName, options, callback) { // Delete index command const cmd = { dropIndexes: coll.s.name, index: indexName }; // Decorate command with writeConcern if supported applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options); // Execute command executeCommand(coll.s.db, cmd, options, (err, result) => { if (typeof callback !== 'function') return; if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result); }); } /** * Drop all indexes from this collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.dropIndexes for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function dropIndexes(coll, options, callback) { dropIndex(coll, '*', options, err => { if (err) return handleCallback(callback, err, false); handleCallback(callback, null, true); }); } /** * Ensure that an index exists. If the index does not exist, this function creates it. * * @method * @param {Collection} a Collection instance. * @param {(string|object)} fieldOrSpec Defines the index. * @param {object} [options] Optional settings. See Collection.prototype.ensureIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function ensureIndex(coll, fieldOrSpec, options, callback) { ensureIndexDb(coll.s.db, coll.s.name, fieldOrSpec, options, callback); } /** * Find and update a document. * * @method * @param {Collection} a Collection instance. * @param {object} query Query object to locate the object to modify. * @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate. * @param {object} doc The fields/vals to be updated. * @param {object} [options] Optional settings. See Collection.prototype.findAndModify for a list of options. * @param {Collection~findAndModifyCallback} [callback] The command result callback * @deprecated use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead */ function findAndModify(coll, query, sort, doc, options, callback) { // Create findAndModify command object const queryObject = { findAndModify: coll.s.name, query: query }; sort = formattedOrderClause(sort); if (sort) { queryObject.sort = sort; } queryObject.new = options.new ? true : false; queryObject.remove = options.remove ? true : false; queryObject.upsert = options.upsert ? true : false; const projection = options.projection || options.fields; if (projection) { queryObject.fields = projection; } if (options.arrayFilters) { queryObject.arrayFilters = options.arrayFilters; delete options.arrayFilters; } if (doc && !options.remove) { queryObject.update = doc; } if (options.maxTimeMS) queryObject.maxTimeMS = options.maxTimeMS; // Either use override on the function, or go back to default on either the collection // level or db options.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions; // No check on the documents options.checkKeys = false; // Get the write concern settings const finalOptions = applyWriteConcern(options, { db: coll.s.db, collection: coll }, options); // Decorate the findAndModify command with the write Concern if (finalOptions.writeConcern) { queryObject.writeConcern = finalOptions.writeConcern; } // Have we specified bypassDocumentValidation if (finalOptions.bypassDocumentValidation === true) { queryObject.bypassDocumentValidation = finalOptions.bypassDocumentValidation; } // Have we specified collation decorateWithCollation(queryObject, coll, finalOptions); // Execute the command executeCommand(coll.s.db, queryObject, finalOptions, (err, result) => { if (err) return handleCallback(callback, err, null); return handleCallback(callback, null, result); }); } /** * Find and remove a document. * * @method * @param {Collection} a Collection instance. * @param {object} query Query object to locate the object to modify. * @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate. * @param {object} [options] Optional settings. See Collection.prototype.findAndRemove for a list of options. * @param {Collection~resultCallback} [callback] The command result callback * @deprecated use findOneAndDelete instead */ function findAndRemove(coll, query, sort, options, callback) { // Add the remove option options.remove = true; // Execute the callback findAndModify(coll, query, sort, null, options, callback); } /** * Fetch the first document that matches the query. * * @method * @param {Collection} a Collection instance. * @param {object} query Query for find Operation * @param {object} [options] Optional settings. See Collection.prototype.findOne for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function findOne(coll, query, options, callback) { const cursor = coll .find(query, options) .limit(-1) .batchSize(1); // Return the item cursor.next((err, item) => { if (err != null) return handleCallback(callback, toError(err), null); handleCallback(callback, null, item); }); } /** * Find a document and delete it in one atomic operation. This requires a write lock for the duration of the operation. * * @method * @param {Collection} a Collection instance. * @param {object} filter Document selection filter. * @param {object} [options] Optional settings. See Collection.prototype.findOneAndDelete for a list of options. * @param {Collection~findAndModifyCallback} [callback] The collection result callback */ function findOneAndDelete(coll, filter, options, callback) { // Final options const finalOptions = Object.assign({}, options); finalOptions.fields = options.projection; finalOptions.remove = true; // Execute find and Modify findAndModify(coll, filter, options.sort, null, finalOptions, callback); } /** * Find a document and replace it in one atomic operation. This requires a write lock for the duration of the operation. * * @method * @param {Collection} a Collection instance. * @param {object} filter Document selection filter. * @param {object} replacement Document replacing the matching document. * @param {object} [options] Optional settings. See Collection.prototype.findOneAndReplace for a list of options. * @param {Collection~findAndModifyCallback} [callback] The collection result callback */ function findOneAndReplace(coll, filter, replacement, options, callback) { // Final options const finalOptions = Object.assign({}, options); finalOptions.fields = options.projection; finalOptions.update = true; finalOptions.new = options.returnOriginal !== void 0 ? !options.returnOriginal : false; finalOptions.upsert = options.upsert !== void 0 ? !!options.upsert : false; // Execute findAndModify findAndModify(coll, filter, options.sort, replacement, finalOptions, callback); } /** * Find a document and update it in one atomic operation. This requires a write lock for the duration of the operation. * * @method * @param {Collection} a Collection instance. * @param {object} filter Document selection filter. * @param {object} update Update operations to be performed on the document * @param {object} [options] Optional settings. See Collection.prototype.findOneAndUpdate for a list of options. * @param {Collection~findAndModifyCallback} [callback] The collection result callback */ function findOneAndUpdate(coll, filter, update, options, callback) { // Final options const finalOptions = Object.assign({}, options); finalOptions.fields = options.projection; finalOptions.update = true; finalOptions.new = typeof options.returnOriginal === 'boolean' ? !options.returnOriginal : false; finalOptions.upsert = typeof options.upsert === 'boolean' ? options.upsert : false; // Execute findAndModify findAndModify(coll, filter, options.sort, update, finalOptions, callback); } /** * Execute a geo search using a geo haystack index on a collection. * * @method * @param {Collection} a Collection instance. * @param {number} x Point to search on the x axis, ensure the indexes are ordered in the same order. * @param {number} y Point to search on the y axis, ensure the indexes are ordered in the same order. * @param {object} [options] Optional settings. See Collection.prototype.geoHaystackSearch for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function geoHaystackSearch(coll, x, y, options, callback) { // Build command object let commandObject = { geoSearch: coll.s.name, near: [x, y] }; // Remove read preference from hash if it exists commandObject = decorateCommand(commandObject, options, { readPreference: true, session: true }); options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Do we have a readConcern specified decorateWithReadConcern(commandObject, coll, options); // Execute the command executeCommand(coll.s.db, commandObject, options, (err, res) => { if (err) return handleCallback(callback, err); if (res.err || res.errmsg) handleCallback(callback, toError(res)); // should we only be returning res.results here? Not sure if the user // should see the other return information handleCallback(callback, null, res); }); } /** * Run a group command across a collection. * * @method * @param {Collection} a Collection instance. * @param {(object|array|function|code)} keys An object, array or function expressing the keys to group by. * @param {object} condition An optional condition that must be true for a row to be considered. * @param {object} initial Initial value of the aggregation counter object. * @param {(function|Code)} reduce The reduce function aggregates (reduces) the objects iterated * @param {(function|Code)} finalize An optional function to be run on each item in the result set just before the item is returned. * @param {boolean} command Specify if you wish to run using the internal group command or using eval, default is true. * @param {object} [options] Optional settings. See Collection.prototype.group for a list of options. * @param {Collection~resultCallback} [callback] The command result callback * @deprecated MongoDB 3.6 or higher will no longer support the group command. We recommend rewriting using the aggregation framework. */ function group(coll, keys, condition, initial, reduce, finalize, command, options, callback) { // Execute using the command if (command) { const reduceFunction = reduce && reduce._bsontype === 'Code' ? reduce : new Code(reduce); const selector = { group: { ns: coll.s.name, $reduce: reduceFunction, cond: condition, initial: initial, out: 'inline' } }; // if finalize is defined if (finalize != null) selector.group['finalize'] = finalize; // Set up group selector if ('function' === typeof keys || (keys && keys._bsontype === 'Code')) { selector.group.$keyf = keys && keys._bsontype === 'Code' ? keys : new Code(keys); } else { const hash = {}; keys.forEach(key => { hash[key] = 1; }); selector.group.key = hash; } options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Do we have a readConcern specified decorateWithReadConcern(selector, coll, options); // Have we specified collation decorateWithCollation(selector, coll, options); // Execute command executeCommand(coll.s.db, selector, options, (err, result) => { if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result.retval); }); } else { // Create execution scope const scope = reduce != null && reduce._bsontype === 'Code' ? reduce.scope : {}; scope.ns = coll.s.name; scope.keys = keys; scope.condition = condition; scope.initial = initial; // Pass in the function text to execute within mongodb. const groupfn = groupFunction.replace(/ reduce;/, reduce.toString() + ';'); evaluate(coll.s.db, new Code(groupfn, scope), null, options, (err, results) => { if (err) return handleCallback(callback, err, null); handleCallback(callback, null, results.result || results); }); } } /** * Retrieve all the indexes on the collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.indexes for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function indexes(coll, options, callback) { options = Object.assign({}, { full: true }, options); indexInformationDb(coll.s.db, coll.s.name, options, callback); } /** * Check if one or more indexes exist on the collection. This fails on the first index that doesn't exist. * * @method * @param {Collection} a Collection instance. * @param {(string|array)} indexes One or more index names to check. * @param {Object} [options] Optional settings. See Collection.prototype.indexExists for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function indexExists(coll, indexes, options, callback) { indexInformation(coll, options, (err, indexInformation) => { // If we have an error return if (err != null) return handleCallback(callback, err, null); // Let's check for the index names if (!Array.isArray(indexes)) return handleCallback(callback, null, indexInformation[indexes] != null); // Check in list of indexes for (let i = 0; i < indexes.length; i++) { if (indexInformation[indexes[i]] == null) { return handleCallback(callback, null, false); } } // All keys found return true return handleCallback(callback, null, true); }); } /** * Retrieve this collection's index info. * * @method * @param {Collection} a Collection instance. * @param {object} [options] Optional settings. See Collection.prototype.indexInformation for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function indexInformation(coll, options, callback) { indexInformationDb(coll.s.db, coll.s.name, options, callback); } function insertDocuments(coll, docs, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; // Ensure we are operating on an array op docs docs = Array.isArray(docs) ? docs : [docs]; // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // If keep going set unordered if (finalOptions.keepGoing === true) finalOptions.ordered = false; finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions; docs = prepareDocs(coll, docs, options); // File inserts coll.s.topology.insert(coll.s.namespace, docs, finalOptions, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err); if (result == null) return handleCallback(callback, null, null); if (result.result.code) return handleCallback(callback, toError(result.result)); if (result.result.writeErrors) return handleCallback(callback, toError(result.result.writeErrors[0])); // Add docs to the list result.ops = docs; // Return the results handleCallback(callback, null, result); }); } /** * Insert a single document into the collection. See Collection.prototype.insertOne for more information. * * @method * @param {Collection} a Collection instance. * @param {object} doc Document to insert. * @param {object} [options] Optional settings. See Collection.prototype.insertOne for a list of options. * @param {Collection~insertOneWriteOpCallback} [callback] The command result callback */ function insertOne(coll, doc, options, callback) { if (Array.isArray(doc)) { return callback( MongoError.create({ message: 'doc parameter must be an object', driver: true }) ); } insertDocuments(coll, [doc], options, (err, r) => { if (callback == null) return; if (err && callback) return callback(err); // Workaround for pre 2.6 servers if (r == null) return callback(null, { result: { ok: 1 } }); // Add values to top level to ensure crud spec compatibility r.insertedCount = r.result.n; r.insertedId = doc._id; if (callback) callback(null, r); }); } /** * Determine whether the collection is a capped collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.isCapped for a list of options. * @param {Collection~resultCallback} [callback] The results callback */ function isCapped(coll, options, callback) { optionsOp(coll, options, (err, document) => { if (err) return handleCallback(callback, err); handleCallback(callback, null, document && document.capped); }); } /** * Run Map Reduce across a collection. Be aware that the inline option for out will return an array of results not a collection. * * @method * @param {Collection} a Collection instance. * @param {(function|string)} map The mapping function. * @param {(function|string)} reduce The reduce function. * @param {object} [options] Optional settings. See Collection.prototype.mapReduce for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function mapReduce(coll, map, reduce, options, callback) { const mapCommandHash = { mapreduce: coll.s.name, map: map, reduce: reduce }; // Exclusion list const exclusionList = ['readPreference', 'session', 'bypassDocumentValidation']; // Add any other options passed in for (let n in options) { if ('scope' === n) { mapCommandHash[n] = processScope(options[n]); } else { // Only include if not in exclusion list if (exclusionList.indexOf(n) === -1) { mapCommandHash[n] = options[n]; } } } options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // If we have a read preference and inline is not set as output fail hard if ( options.readPreference !== false && options.readPreference !== 'primary' && options['out'] && (options['out'].inline !== 1 && options['out'] !== 'inline') ) { // Force readPreference to primary options.readPreference = 'primary'; // Decorate command with writeConcern if supported applyWriteConcern(mapCommandHash, { db: coll.s.db, collection: coll }, options); } else { decorateWithReadConcern(mapCommandHash, coll, options); } // Is bypassDocumentValidation specified if (options.bypassDocumentValidation === true) { mapCommandHash.bypassDocumentValidation = options.bypassDocumentValidation; } // Have we specified collation decorateWithCollation(mapCommandHash, coll, options); // Execute command executeCommand(coll.s.db, mapCommandHash, options, (err, result) => { if (err) return handleCallback(callback, err); // Check if we have an error if (1 !== result.ok || result.err || result.errmsg) { return handleCallback(callback, toError(result)); } // Create statistics value const stats = {}; if (result.timeMillis) stats['processtime'] = result.timeMillis; if (result.counts) stats['counts'] = result.counts; if (result.timing) stats['timing'] = result.timing; // invoked with inline? if (result.results) { // If we wish for no verbosity if (options['verbose'] == null || !options['verbose']) { return handleCallback(callback, null, result.results); } return handleCallback(callback, null, { results: result.results, stats: stats }); } // The returned collection let collection = null; // If we have an object it's a different db if (result.result != null && typeof result.result === 'object') { const doc = result.result; // Return a collection from another db const Db = require('../db'); collection = new Db(doc.db, coll.s.db.s.topology, coll.s.db.s.options).collection( doc.collection ); } else { // Create a collection object that wraps the result collection collection = coll.s.db.collection(result.result); } // If we wish for no verbosity if (options['verbose'] == null || !options['verbose']) { return handleCallback(callback, err, collection); } // Return stats as third set of values handleCallback(callback, err, { collection: collection, stats: stats }); }); } /** * Return the options of the collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.options for a list of options. * @param {Collection~resultCallback} [callback] The results callback */ function optionsOp(coll, opts, callback) { coll.s.db.listCollections({ name: coll.s.name }, opts).toArray((err, collections) => { if (err) return handleCallback(callback, err); if (collections.length === 0) { return handleCallback( callback, MongoError.create({ message: `collection ${coll.s.namespace} not found`, driver: true }) ); } handleCallback(callback, err, collections[0].options || null); }); } /** * Return N parallel cursors for a collection to allow parallel reading of the entire collection. There are * no ordering guarantees for returned results. * * @method * @param {Collection} a Collection instance. * @param {object} [options] Optional settings. See Collection.prototype.parallelCollectionScan for a list of options. * @param {Collection~parallelCollectionScanCallback} [callback] The command result callback */ function parallelCollectionScan(coll, options, callback) { // Create command object const commandObject = { parallelCollectionScan: coll.s.name, numCursors: options.numCursors }; // Do we have a readConcern specified decorateWithReadConcern(commandObject, coll, options); // Store the raw value const raw = options.raw; delete options['raw']; // Execute the command executeCommand(coll.s.db, commandObject, options, (err, result) => { if (err) return handleCallback(callback, err, null); if (result == null) return handleCallback( callback, new Error('no result returned for parallelCollectionScan'), null ); options = Object.assign({ explicitlyIgnoreSession: true }, options); const cursors = []; // Add the raw back to the option if (raw) options.raw = raw; // Create command cursors for each item for (let i = 0; i < result.cursors.length; i++) { const rawId = result.cursors[i].cursor.id; // Convert cursorId to Long if needed const cursorId = typeof rawId === 'number' ? Long.fromNumber(rawId) : rawId; // Add a command cursor cursors.push(coll.s.topology.cursor(coll.s.namespace, cursorId, options)); } handleCallback(callback, null, cursors); }); } // modifies documents before being inserted or updated function prepareDocs(coll, docs, options) { const forceServerObjectId = typeof options.forceServerObjectId === 'boolean' ? options.forceServerObjectId : coll.s.db.options.forceServerObjectId; // no need to modify the docs if server sets the ObjectId if (forceServerObjectId === true) { return docs; } return docs.map(doc => { if (forceServerObjectId !== true && doc._id == null) { doc._id = coll.s.pkFactory.createPk(); } return doc; }); } /** * Functions that are passed as scope args must * be converted to Code instances. * @ignore */ function processScope(scope) { if (!isObject(scope) || scope._bsontype === 'ObjectID') { return scope; } const keys = Object.keys(scope); let key; const new_scope = {}; for (let i = keys.length - 1; i >= 0; i--) { key = keys[i]; if ('function' === typeof scope[key]) { new_scope[key] = new Code(String(scope[key])); } else { new_scope[key] = processScope(scope[key]); } } return new_scope; } /** * Reindex all indexes on the collection. * * @method * @param {Collection} a Collection instance. * @param {Object} [options] Optional settings. See Collection.prototype.reIndex for a list of options. * @param {Collection~resultCallback} [callback] The command result callback */ function reIndex(coll, options, callback) { // Reindex const cmd = { reIndex: coll.s.name }; // Execute the command executeCommand(coll.s.db, cmd, options, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result.ok ? true : false); }); } function removeDocuments(coll, selector, options, callback) { if (typeof options === 'function') { (callback = options), (options = {}); } else if (typeof selector === 'function') { callback = selector; options = {}; selector = {}; } // Create an empty options object if the provided one is null options = options || {}; // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // If selector is null set empty if (selector == null) selector = {}; // Build the op const op = { q: selector, limit: 0 }; if (options.single) { op.limit = 1; } else if (finalOptions.retryWrites) { finalOptions.retryWrites = false; } // Have we specified collation decorateWithCollation(finalOptions, coll, options); // Execute the remove coll.s.topology.remove(coll.s.namespace, [op], finalOptions, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err, null); if (result == null) return handleCallback(callback, null, null); if (result.result.code) return handleCallback(callback, toError(result.result)); if (result.result.writeErrors) return handleCallback(callback, toError(result.result.writeErrors[0])); // Return the results handleCallback(callback, null, result); }); } /** * Rename the collection. * * @method * @param {Collection} a Collection instance. * @param {string} newName New name of of the collection. * @param {object} [options] Optional settings. See Collection.prototype.rename for a list of options. * @param {Collection~collectionResultCallback} [callback] The results callback */ function rename(coll, newName, options, callback) { const Collection = require('../collection'); // Check the collection name checkCollectionName(newName); // Build the command const renameCollection = `${coll.s.dbName}.${coll.s.name}`; const toCollection = `${coll.s.dbName}.${newName}`; const dropTarget = typeof options.dropTarget === 'boolean' ? options.dropTarget : false; const cmd = { renameCollection: renameCollection, to: toCollection, dropTarget: dropTarget }; // Decorate command with writeConcern if supported applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options); // Execute against admin executeDbAdminCommand(coll.s.db.admin().s.db, cmd, options, (err, doc) => { if (err) return handleCallback(callback, err, null); // We have an error if (doc.errmsg) return handleCallback(callback, toError(doc), null); try { return handleCallback( callback, null, new Collection( coll.s.db, coll.s.topology, coll.s.dbName, newName, coll.s.pkFactory, coll.s.options ) ); } catch (err) { return handleCallback(callback, toError(err), null); } }); } /** * Replace a document in the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the document to update * @param {object} doc The Document that replaces the matching document * @param {object} [options] Optional settings. See Collection.prototype.replaceOne for a list of options. * @param {Collection~updateWriteOpCallback} [callback] The command result callback */ function replaceOne(coll, filter, doc, options, callback) { // Set single document update options.multi = false; // Execute update updateDocuments(coll, filter, doc, options, (err, r) => { if (callback == null) return; if (err && callback) return callback(err); if (r == null) return callback(null, { result: { ok: 1 } }); r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n; r.upsertedId = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id` : null; r.upsertedCount = Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0; r.matchedCount = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n; r.ops = [doc]; if (callback) callback(null, r); }); } /** * Save a document. * * @method * @param {Collection} a Collection instance. * @param {object} doc Document to save * @param {object} [options] Optional settings. See Collection.prototype.save for a list of options. * @param {Collection~writeOpCallback} [callback] The command result callback * @deprecated use insertOne, insertMany, updateOne or updateMany */ function save(coll, doc, options, callback) { // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // Establish if we need to perform an insert or update if (doc._id != null) { finalOptions.upsert = true; return updateDocuments(coll, { _id: doc._id }, doc, finalOptions, callback); } // Insert the document insertDocuments(coll, [doc], finalOptions, (err, result) => { if (callback == null) return; if (doc == null) return handleCallback(callback, null, null); if (err) return handleCallback(callback, err, null); handleCallback(callback, null, result); }); } /** * Get all the collection statistics. * * @method * @param {Collection} a Collection instance. * @param {object} [options] Optional settings. See Collection.prototype.stats for a list of options. * @param {Collection~resultCallback} [callback] The collection result callback */ function stats(coll, options, callback) { // Build command object const commandObject = { collStats: coll.s.name }; // Check if we have the scale value if (options['scale'] != null) commandObject['scale'] = options['scale']; options = Object.assign({}, options); // Ensure we have the right read preference inheritance options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll }); // Execute the command executeCommand(coll.s.db, commandObject, options, callback); } function updateCallback(err, r, callback) { if (callback == null) return; if (err) return callback(err); if (r == null) return callback(null, { result: { ok: 1 } }); r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n; r.upsertedId = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id` : null; r.upsertedCount = Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0; r.matchedCount = Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n; callback(null, r); } function updateDocuments(coll, selector, document, options, callback) { if ('function' === typeof options) (callback = options), (options = null); if (options == null) options = {}; if (!('function' === typeof callback)) callback = null; // If we are not providing a selector or document throw if (selector == null || typeof selector !== 'object') return callback(toError('selector must be a valid JavaScript object')); if (document == null || typeof document !== 'object') return callback(toError('document must be a valid JavaScript object')); // Get the write concern options const finalOptions = applyWriteConcern( Object.assign({}, options), { db: coll.s.db, collection: coll }, options ); // Do we return the actual result document // Either use override on the function, or go back to default on either the collection // level or db finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions; // Execute the operation const op = { q: selector, u: document }; op.upsert = options.upsert !== void 0 ? !!options.upsert : false; op.multi = options.multi !== void 0 ? !!options.multi : false; if (finalOptions.arrayFilters) { op.arrayFilters = finalOptions.arrayFilters; delete finalOptions.arrayFilters; } if (finalOptions.retryWrites && op.multi) { finalOptions.retryWrites = false; } // Have we specified collation decorateWithCollation(finalOptions, coll, options); // Update options coll.s.topology.update(coll.s.namespace, [op], finalOptions, (err, result) => { if (callback == null) return; if (err) return handleCallback(callback, err, null); if (result == null) return handleCallback(callback, null, null); if (result.result.code) return handleCallback(callback, toError(result.result)); if (result.result.writeErrors) return handleCallback(callback, toError(result.result.writeErrors[0])); // Return the results handleCallback(callback, null, result); }); } /** * Update multiple documents in the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the documents to update * @param {object} update The update operations to be applied to the document * @param {object} [options] Optional settings. See Collection.prototype.updateMany for a list of options. * @param {Collection~updateWriteOpCallback} [callback] The command result callback */ function updateMany(coll, filter, update, options, callback) { // Set single document update options.multi = true; // Execute update updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback)); } /** * Update a single document in the collection. * * @method * @param {Collection} a Collection instance. * @param {object} filter The Filter used to select the document to update * @param {object} update The update operations to be applied to the document * @param {object} [options] Optional settings. See Collection.prototype.updateOne for a list of options. * @param {Collection~updateWriteOpCallback} [callback] The command result callback */ function updateOne(coll, filter, update, options, callback) { // Set single document update options.multi = false; // Execute update updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback)); } module.exports = { bulkWrite, checkForAtomicOperators, count, countDocuments, createIndex, createIndexes, deleteMany, deleteOne, distinct, dropIndex, dropIndexes, ensureIndex, findAndModify, findAndRemove, findOne, findOneAndDelete, findOneAndReplace, findOneAndUpdate, geoHaystackSearch, group, indexes, indexExists, indexInformation, insertOne, isCapped, mapReduce, optionsOp, parallelCollectionScan, prepareDocs, reIndex, removeDocuments, rename, replaceOne, save, stats, updateDocuments, updateMany, updateOne };
1
14,586
Do we normally force ReadPreference primary on other write operations? Or is it just that we normally ignore it?
mongodb-node-mongodb-native
js
@@ -292,6 +292,12 @@ public class Spark3Util { return Expressions.hours(sourceName); } + @Override + public Transform alwaysNull(int fieldId, String sourceName, int sourceId) { + // do nothing for alwaysNull, it doesn't need to be converted to a transform + return null; + } + @Override public Transform unknown(int fieldId, String sourceName, int sourceId, String transform) { return Expressions.apply(transform, Expressions.column(sourceName));
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.apache.hadoop.fs.Path; import org.apache.iceberg.DistributionMode; import org.apache.iceberg.FileFormat; import org.apache.iceberg.MetadataTableType; import org.apache.iceberg.NullOrder; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.TableProperties; import org.apache.iceberg.UpdateProperties; import org.apache.iceberg.UpdateSchema; import org.apache.iceberg.catalog.Namespace; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.expressions.BoundPredicate; import org.apache.iceberg.expressions.ExpressionVisitors; import org.apache.iceberg.expressions.Term; import org.apache.iceberg.expressions.UnboundPredicate; import org.apache.iceberg.hadoop.HadoopInputFile; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.relocated.com.google.common.base.Joiner; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.spark.SparkTableUtil.SparkPartition; import org.apache.iceberg.spark.source.SparkTable; import org.apache.iceberg.transforms.PartitionSpecVisitor; import org.apache.iceberg.transforms.SortOrderVisitor; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; import org.apache.iceberg.util.ArrayUtil; import org.apache.iceberg.util.Pair; import org.apache.iceberg.util.PropertyUtil; import org.apache.iceberg.util.SortOrderUtil; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.RuntimeConfig; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.CatalystTypeConverters; import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.apache.spark.sql.catalyst.parser.ParseException; import org.apache.spark.sql.catalyst.parser.ParserInterface; import org.apache.spark.sql.connector.catalog.CatalogManager; import org.apache.spark.sql.connector.catalog.CatalogPlugin; import org.apache.spark.sql.connector.catalog.Identifier; import org.apache.spark.sql.connector.catalog.Table; import org.apache.spark.sql.connector.catalog.TableCatalog; import org.apache.spark.sql.connector.catalog.TableChange; import org.apache.spark.sql.connector.expressions.Expression; import org.apache.spark.sql.connector.expressions.Expressions; import org.apache.spark.sql.connector.expressions.Literal; import org.apache.spark.sql.connector.expressions.Transform; import org.apache.spark.sql.connector.iceberg.distributions.Distribution; import org.apache.spark.sql.connector.iceberg.distributions.Distributions; import org.apache.spark.sql.connector.iceberg.distributions.OrderedDistribution; import org.apache.spark.sql.connector.iceberg.expressions.SortOrder; import org.apache.spark.sql.execution.datasources.FileStatusCache; import org.apache.spark.sql.execution.datasources.InMemoryFileIndex; import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation; import org.apache.spark.sql.types.IntegerType; import org.apache.spark.sql.types.LongType; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.util.CaseInsensitiveStringMap; import scala.Option; import scala.Predef; import scala.Some; import scala.collection.JavaConverters; import scala.collection.Seq; import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE; import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_DEFAULT; import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_RANGE; public class Spark3Util { private static final Set<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs"); private static final Set<String> RESERVED_PROPERTIES = ImmutableSet.of( TableCatalog.PROP_LOCATION, TableCatalog.PROP_PROVIDER); private static final Joiner DOT = Joiner.on("."); private Spark3Util() { } public static Map<String, String> rebuildCreateProperties(Map<String, String> createProperties) { ImmutableMap.Builder<String, String> tableProperties = ImmutableMap.builder(); createProperties.entrySet().stream() .filter(entry -> !RESERVED_PROPERTIES.contains(entry.getKey())) .forEach(tableProperties::put); String provider = createProperties.get(TableCatalog.PROP_PROVIDER); if ("parquet".equalsIgnoreCase(provider)) { tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "parquet"); } else if ("avro".equalsIgnoreCase(provider)) { tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "avro"); } else if ("orc".equalsIgnoreCase(provider)) { tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "orc"); } else if (provider != null && !"iceberg".equalsIgnoreCase(provider)) { throw new IllegalArgumentException("Unsupported format in USING: " + provider); } return tableProperties.build(); } /** * Applies a list of Spark table changes to an {@link UpdateProperties} operation. * * @param pendingUpdate an uncommitted UpdateProperties operation to configure * @param changes a list of Spark table changes * @return the UpdateProperties operation configured with the changes */ public static UpdateProperties applyPropertyChanges(UpdateProperties pendingUpdate, List<TableChange> changes) { for (TableChange change : changes) { if (change instanceof TableChange.SetProperty) { TableChange.SetProperty set = (TableChange.SetProperty) change; pendingUpdate.set(set.property(), set.value()); } else if (change instanceof TableChange.RemoveProperty) { TableChange.RemoveProperty remove = (TableChange.RemoveProperty) change; pendingUpdate.remove(remove.property()); } else { throw new UnsupportedOperationException("Cannot apply unknown table change: " + change); } } return pendingUpdate; } /** * Applies a list of Spark table changes to an {@link UpdateSchema} operation. * * @param pendingUpdate an uncommitted UpdateSchema operation to configure * @param changes a list of Spark table changes * @return the UpdateSchema operation configured with the changes */ public static UpdateSchema applySchemaChanges(UpdateSchema pendingUpdate, List<TableChange> changes) { for (TableChange change : changes) { if (change instanceof TableChange.AddColumn) { apply(pendingUpdate, (TableChange.AddColumn) change); } else if (change instanceof TableChange.UpdateColumnType) { TableChange.UpdateColumnType update = (TableChange.UpdateColumnType) change; Type newType = SparkSchemaUtil.convert(update.newDataType()); Preconditions.checkArgument(newType.isPrimitiveType(), "Cannot update '%s', not a primitive type: %s", DOT.join(update.fieldNames()), update.newDataType()); pendingUpdate.updateColumn(DOT.join(update.fieldNames()), newType.asPrimitiveType()); } else if (change instanceof TableChange.UpdateColumnComment) { TableChange.UpdateColumnComment update = (TableChange.UpdateColumnComment) change; pendingUpdate.updateColumnDoc(DOT.join(update.fieldNames()), update.newComment()); } else if (change instanceof TableChange.RenameColumn) { TableChange.RenameColumn rename = (TableChange.RenameColumn) change; pendingUpdate.renameColumn(DOT.join(rename.fieldNames()), rename.newName()); } else if (change instanceof TableChange.DeleteColumn) { TableChange.DeleteColumn delete = (TableChange.DeleteColumn) change; pendingUpdate.deleteColumn(DOT.join(delete.fieldNames())); } else if (change instanceof TableChange.UpdateColumnNullability) { TableChange.UpdateColumnNullability update = (TableChange.UpdateColumnNullability) change; if (update.nullable()) { pendingUpdate.makeColumnOptional(DOT.join(update.fieldNames())); } else { pendingUpdate.requireColumn(DOT.join(update.fieldNames())); } } else if (change instanceof TableChange.UpdateColumnPosition) { apply(pendingUpdate, (TableChange.UpdateColumnPosition) change); } else { throw new UnsupportedOperationException("Cannot apply unknown table change: " + change); } } return pendingUpdate; } private static void apply(UpdateSchema pendingUpdate, TableChange.UpdateColumnPosition update) { Preconditions.checkArgument(update.position() != null, "Invalid position: null"); if (update.position() instanceof TableChange.After) { TableChange.After after = (TableChange.After) update.position(); String referenceField = peerName(update.fieldNames(), after.column()); pendingUpdate.moveAfter(DOT.join(update.fieldNames()), referenceField); } else if (update.position() instanceof TableChange.First) { pendingUpdate.moveFirst(DOT.join(update.fieldNames())); } else { throw new IllegalArgumentException("Unknown position for reorder: " + update.position()); } } private static void apply(UpdateSchema pendingUpdate, TableChange.AddColumn add) { Preconditions.checkArgument(add.isNullable(), "Incompatible change: cannot add required column: %s", leafName(add.fieldNames())); Type type = SparkSchemaUtil.convert(add.dataType()); pendingUpdate.addColumn(parentName(add.fieldNames()), leafName(add.fieldNames()), type, add.comment()); if (add.position() instanceof TableChange.After) { TableChange.After after = (TableChange.After) add.position(); String referenceField = peerName(add.fieldNames(), after.column()); pendingUpdate.moveAfter(DOT.join(add.fieldNames()), referenceField); } else if (add.position() instanceof TableChange.First) { pendingUpdate.moveFirst(DOT.join(add.fieldNames())); } else { Preconditions.checkArgument(add.position() == null, "Cannot add '%s' at unknown position: %s", DOT.join(add.fieldNames()), add.position()); } } public static org.apache.iceberg.Table toIcebergTable(Table table) { Preconditions.checkArgument(table instanceof SparkTable, "Table %s is not an Iceberg table", table); SparkTable sparkTable = (SparkTable) table; return sparkTable.table(); } /** * Converts a PartitionSpec to Spark transforms. * * @param spec a PartitionSpec * @return an array of Transforms */ public static Transform[] toTransforms(PartitionSpec spec) { List<Transform> transforms = PartitionSpecVisitor.visit(spec, new PartitionSpecVisitor<Transform>() { @Override public Transform identity(String sourceName, int sourceId) { return Expressions.identity(sourceName); } @Override public Transform bucket(String sourceName, int sourceId, int numBuckets) { return Expressions.bucket(numBuckets, sourceName); } @Override public Transform truncate(String sourceName, int sourceId, int width) { return Expressions.apply("truncate", Expressions.column(sourceName), Expressions.literal(width)); } @Override public Transform year(String sourceName, int sourceId) { return Expressions.years(sourceName); } @Override public Transform month(String sourceName, int sourceId) { return Expressions.months(sourceName); } @Override public Transform day(String sourceName, int sourceId) { return Expressions.days(sourceName); } @Override public Transform hour(String sourceName, int sourceId) { return Expressions.hours(sourceName); } @Override public Transform unknown(int fieldId, String sourceName, int sourceId, String transform) { return Expressions.apply(transform, Expressions.column(sourceName)); } }); return transforms.toArray(new Transform[0]); } public static Distribution buildRequiredDistribution(org.apache.iceberg.Table table) { DistributionMode distributionMode = distributionModeFor(table); switch (distributionMode) { case NONE: return Distributions.unspecified(); case HASH: if (table.spec().isUnpartitioned()) { return Distributions.unspecified(); } else { return Distributions.clustered(toTransforms(table.spec())); } case RANGE: if (table.spec().isUnpartitioned() && table.sortOrder().isUnsorted()) { return Distributions.unspecified(); } else { org.apache.iceberg.SortOrder requiredSortOrder = SortOrderUtil.buildSortOrder(table); return Distributions.ordered(convert(requiredSortOrder)); } default: throw new IllegalArgumentException("Unsupported distribution mode: " + distributionMode); } } public static SortOrder[] buildRequiredOrdering(Distribution distribution, org.apache.iceberg.Table table) { if (distribution instanceof OrderedDistribution) { OrderedDistribution orderedDistribution = (OrderedDistribution) distribution; return orderedDistribution.ordering(); } else { org.apache.iceberg.SortOrder requiredSortOrder = SortOrderUtil.buildSortOrder(table); return convert(requiredSortOrder); } } public static DistributionMode distributionModeFor(org.apache.iceberg.Table table) { boolean isSortedTable = !table.sortOrder().isUnsorted(); String defaultModeName = isSortedTable ? WRITE_DISTRIBUTION_MODE_RANGE : WRITE_DISTRIBUTION_MODE_DEFAULT; String modeName = table.properties().getOrDefault(WRITE_DISTRIBUTION_MODE, defaultModeName); return DistributionMode.fromName(modeName); } public static SortOrder[] convert(org.apache.iceberg.SortOrder sortOrder) { List<OrderField> converted = SortOrderVisitor.visit(sortOrder, new SortOrderToSpark()); return converted.toArray(new OrderField[0]); } public static Term toIcebergTerm(Transform transform) { Preconditions.checkArgument(transform.references().length == 1, "Cannot convert transform with more than one column reference: %s", transform); String colName = DOT.join(transform.references()[0].fieldNames()); switch (transform.name()) { case "identity": return org.apache.iceberg.expressions.Expressions.ref(colName); case "bucket": return org.apache.iceberg.expressions.Expressions.bucket(colName, findWidth(transform)); case "years": return org.apache.iceberg.expressions.Expressions.year(colName); case "months": return org.apache.iceberg.expressions.Expressions.month(colName); case "date": case "days": return org.apache.iceberg.expressions.Expressions.day(colName); case "date_hour": case "hours": return org.apache.iceberg.expressions.Expressions.hour(colName); case "truncate": return org.apache.iceberg.expressions.Expressions.truncate(colName, findWidth(transform)); default: throw new UnsupportedOperationException("Transform is not supported: " + transform); } } /** * Converts Spark transforms into a {@link PartitionSpec}. * * @param schema the table schema * @param partitioning Spark Transforms * @return a PartitionSpec */ public static PartitionSpec toPartitionSpec(Schema schema, Transform[] partitioning) { if (partitioning == null || partitioning.length == 0) { return PartitionSpec.unpartitioned(); } PartitionSpec.Builder builder = PartitionSpec.builderFor(schema); for (Transform transform : partitioning) { Preconditions.checkArgument(transform.references().length == 1, "Cannot convert transform with more than one column reference: %s", transform); String colName = DOT.join(transform.references()[0].fieldNames()); switch (transform.name()) { case "identity": builder.identity(colName); break; case "bucket": builder.bucket(colName, findWidth(transform)); break; case "years": builder.year(colName); break; case "months": builder.month(colName); break; case "date": case "days": builder.day(colName); break; case "date_hour": case "hours": builder.hour(colName); break; case "truncate": builder.truncate(colName, findWidth(transform)); break; default: throw new UnsupportedOperationException("Transform is not supported: " + transform); } } return builder.build(); } @SuppressWarnings("unchecked") private static int findWidth(Transform transform) { for (Expression expr : transform.arguments()) { if (expr instanceof Literal) { if (((Literal) expr).dataType() instanceof IntegerType) { Literal<Integer> lit = (Literal<Integer>) expr; Preconditions.checkArgument(lit.value() > 0, "Unsupported width for transform: %s", transform.describe()); return lit.value(); } else if (((Literal) expr).dataType() instanceof LongType) { Literal<Long> lit = (Literal<Long>) expr; Preconditions.checkArgument(lit.value() > 0 && lit.value() < Integer.MAX_VALUE, "Unsupported width for transform: %s", transform.describe()); if (lit.value() > Integer.MAX_VALUE) { throw new IllegalArgumentException(); } return lit.value().intValue(); } } } throw new IllegalArgumentException("Cannot find width for transform: " + transform.describe()); } private static String leafName(String[] fieldNames) { Preconditions.checkArgument(fieldNames.length > 0, "Invalid field name: at least one name is required"); return fieldNames[fieldNames.length - 1]; } private static String peerName(String[] fieldNames, String fieldName) { if (fieldNames.length > 1) { String[] peerNames = Arrays.copyOf(fieldNames, fieldNames.length); peerNames[fieldNames.length - 1] = fieldName; return DOT.join(peerNames); } return fieldName; } private static String parentName(String[] fieldNames) { if (fieldNames.length > 1) { return DOT.join(Arrays.copyOfRange(fieldNames, 0, fieldNames.length - 1)); } return null; } public static String describe(org.apache.iceberg.expressions.Expression expr) { return ExpressionVisitors.visit(expr, DescribeExpressionVisitor.INSTANCE); } public static String describe(Schema schema) { return TypeUtil.visit(schema, DescribeSchemaVisitor.INSTANCE); } public static String describe(Type type) { return TypeUtil.visit(type, DescribeSchemaVisitor.INSTANCE); } public static String describe(org.apache.iceberg.SortOrder order) { return Joiner.on(", ").join(SortOrderVisitor.visit(order, DescribeSortOrderVisitor.INSTANCE)); } public static boolean isLocalityEnabled(FileIO io, String location, CaseInsensitiveStringMap readOptions) { InputFile in = io.newInputFile(location); if (in instanceof HadoopInputFile) { String scheme = ((HadoopInputFile) in).getFileSystem().getScheme(); return readOptions.getBoolean("locality", LOCALITY_WHITELIST_FS.contains(scheme)); } return false; } public static boolean isVectorizationEnabled(FileFormat fileFormat, Map<String, String> properties, RuntimeConfig sessionConf, CaseInsensitiveStringMap readOptions) { String readOptionValue = readOptions.get(SparkReadOptions.VECTORIZATION_ENABLED); if (readOptionValue != null) { return Boolean.parseBoolean(readOptionValue); } String sessionConfValue = sessionConf.get("spark.sql.iceberg.vectorization.enabled", null); if (sessionConfValue != null) { return Boolean.parseBoolean(sessionConfValue); } switch (fileFormat) { case PARQUET: return PropertyUtil.propertyAsBoolean( properties, TableProperties.PARQUET_VECTORIZATION_ENABLED, TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT); case ORC: return PropertyUtil.propertyAsBoolean( properties, TableProperties.ORC_VECTORIZATION_ENABLED, TableProperties.ORC_VECTORIZATION_ENABLED_DEFAULT); default: return false; } } public static int batchSize(Map<String, String> properties, CaseInsensitiveStringMap readOptions) { return readOptions.getInt(SparkReadOptions.VECTORIZATION_BATCH_SIZE, PropertyUtil.propertyAsInt(properties, TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT)); } public static Long propertyAsLong(CaseInsensitiveStringMap options, String property, Long defaultValue) { if (defaultValue != null) { return options.getLong(property, defaultValue); } String value = options.get(property); if (value != null) { return Long.parseLong(value); } return null; } public static Integer propertyAsInt(CaseInsensitiveStringMap options, String property, Integer defaultValue) { if (defaultValue != null) { return options.getInt(property, defaultValue); } String value = options.get(property); if (value != null) { return Integer.parseInt(value); } return null; } public static class DescribeSchemaVisitor extends TypeUtil.SchemaVisitor<String> { private static final Joiner COMMA = Joiner.on(','); private static final DescribeSchemaVisitor INSTANCE = new DescribeSchemaVisitor(); private DescribeSchemaVisitor() { } @Override public String schema(Schema schema, String structResult) { return structResult; } @Override public String struct(Types.StructType struct, List<String> fieldResults) { return "struct<" + COMMA.join(fieldResults) + ">"; } @Override public String field(Types.NestedField field, String fieldResult) { return field.name() + ": " + fieldResult + (field.isRequired() ? " not null" : ""); } @Override public String list(Types.ListType list, String elementResult) { return "map<" + elementResult + ">"; } @Override public String map(Types.MapType map, String keyResult, String valueResult) { return "map<" + keyResult + ", " + valueResult + ">"; } @Override public String primitive(Type.PrimitiveType primitive) { switch (primitive.typeId()) { case BOOLEAN: return "boolean"; case INTEGER: return "int"; case LONG: return "bigint"; case FLOAT: return "float"; case DOUBLE: return "double"; case DATE: return "date"; case TIME: return "time"; case TIMESTAMP: return "timestamp"; case STRING: case UUID: return "string"; case FIXED: case BINARY: return "binary"; case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) primitive; return "decimal(" + decimal.precision() + "," + decimal.scale() + ")"; } throw new UnsupportedOperationException("Cannot convert type to SQL: " + primitive); } } private static class DescribeExpressionVisitor extends ExpressionVisitors.ExpressionVisitor<String> { private static final DescribeExpressionVisitor INSTANCE = new DescribeExpressionVisitor(); private DescribeExpressionVisitor() { } @Override public String alwaysTrue() { return "true"; } @Override public String alwaysFalse() { return "false"; } @Override public String not(String result) { return "NOT (" + result + ")"; } @Override public String and(String leftResult, String rightResult) { return "(" + leftResult + " AND " + rightResult + ")"; } @Override public String or(String leftResult, String rightResult) { return "(" + leftResult + " OR " + rightResult + ")"; } @Override public <T> String predicate(BoundPredicate<T> pred) { throw new UnsupportedOperationException("Cannot convert bound predicates to SQL"); } @Override public <T> String predicate(UnboundPredicate<T> pred) { switch (pred.op()) { case IS_NULL: return pred.ref().name() + " IS NULL"; case NOT_NULL: return pred.ref().name() + " IS NOT NULL"; case IS_NAN: return "is_nan(" + pred.ref().name() + ")"; case NOT_NAN: return "not_nan(" + pred.ref().name() + ")"; case LT: return pred.ref().name() + " < " + sqlString(pred.literal()); case LT_EQ: return pred.ref().name() + " <= " + sqlString(pred.literal()); case GT: return pred.ref().name() + " > " + sqlString(pred.literal()); case GT_EQ: return pred.ref().name() + " >= " + sqlString(pred.literal()); case EQ: return pred.ref().name() + " = " + sqlString(pred.literal()); case NOT_EQ: return pred.ref().name() + " != " + sqlString(pred.literal()); case STARTS_WITH: return pred.ref().name() + " LIKE '" + pred.literal() + "%'"; case IN: return pred.ref().name() + " IN (" + sqlString(pred.literals()) + ")"; case NOT_IN: return pred.ref().name() + " NOT IN (" + sqlString(pred.literals()) + ")"; default: throw new UnsupportedOperationException("Cannot convert predicate to SQL: " + pred); } } private static <T> String sqlString(List<org.apache.iceberg.expressions.Literal<T>> literals) { return literals.stream().map(DescribeExpressionVisitor::sqlString).collect(Collectors.joining(", ")); } private static String sqlString(org.apache.iceberg.expressions.Literal<?> lit) { if (lit.value() instanceof String) { return "'" + lit.value() + "'"; } else if (lit.value() instanceof ByteBuffer) { throw new IllegalArgumentException("Cannot convert bytes to SQL literal: " + lit); } else { return lit.value().toString(); } } } /** * Returns a Metadata Table Dataset if it can be loaded from a Spark V2 Catalog * * Because Spark does not allow more than 1 piece in the namespace for a Session Catalog table, we circumvent * the entire resolution path for tables and instead look up the table directly ourselves. This lets us correctly * get metadata tables for the SessionCatalog, if we didn't have to work around this we could just use spark.table. * * @param spark SparkSession used for looking up catalog references and tables * @param name The multipart identifier of the base Iceberg table * @param type The type of metadata table to load * @return null if we cannot find the Metadata Table, a Dataset of rows otherwise */ private static Dataset<Row> loadCatalogMetadataTable(SparkSession spark, String name, MetadataTableType type) { try { CatalogAndIdentifier catalogAndIdentifier = catalogAndIdentifier(spark, name); if (catalogAndIdentifier.catalog instanceof BaseCatalog) { BaseCatalog catalog = (BaseCatalog) catalogAndIdentifier.catalog; Identifier baseId = catalogAndIdentifier.identifier; Identifier metaId = Identifier.of(ArrayUtil.add(baseId.namespace(), baseId.name()), type.name()); Table metaTable = catalog.loadTable(metaId); return Dataset.ofRows(spark, DataSourceV2Relation.create(metaTable, Some.apply(catalog), Some.apply(metaId))); } } catch (NoSuchTableException | ParseException e) { // Could not find table return null; } // Could not find table return null; } public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, String name) throws ParseException { return catalogAndIdentifier(spark, name, spark.sessionState().catalogManager().currentCatalog()); } public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, String name, CatalogPlugin defaultCatalog) throws ParseException { ParserInterface parser = spark.sessionState().sqlParser(); Seq<String> multiPartIdentifier = parser.parseMultipartIdentifier(name); List<String> javaMultiPartIdentifier = JavaConverters.seqAsJavaList(multiPartIdentifier); return catalogAndIdentifier(spark, javaMultiPartIdentifier, defaultCatalog); } public static CatalogAndIdentifier catalogAndIdentifier(String description, SparkSession spark, String name) { return catalogAndIdentifier(description, spark, name, spark.sessionState().catalogManager().currentCatalog()); } public static CatalogAndIdentifier catalogAndIdentifier(String description, SparkSession spark, String name, CatalogPlugin defaultCatalog) { try { return catalogAndIdentifier(spark, name, defaultCatalog); } catch (ParseException e) { throw new IllegalArgumentException("Cannot parse " + description + ": " + name, e); } } public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, List<String> nameParts) { return catalogAndIdentifier(spark, nameParts, spark.sessionState().catalogManager().currentCatalog()); } /** * A modified version of Spark's LookupCatalog.CatalogAndIdentifier.unapply * Attempts to find the catalog and identifier a multipart identifier represents * @param spark Spark session to use for resolution * @param nameParts Multipart identifier representing a table * @param defaultCatalog Catalog to use if none is specified * @return The CatalogPlugin and Identifier for the table */ public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, List<String> nameParts, CatalogPlugin defaultCatalog) { CatalogManager catalogManager = spark.sessionState().catalogManager(); String[] currentNamespace; if (defaultCatalog.equals(catalogManager.currentCatalog())) { currentNamespace = catalogManager.currentNamespace(); } else { currentNamespace = defaultCatalog.defaultNamespace(); } Pair<CatalogPlugin, Identifier> catalogIdentifier = SparkUtil.catalogAndIdentifier(nameParts, catalogName -> { try { return catalogManager.catalog(catalogName); } catch (Exception e) { return null; } }, Identifier::of, defaultCatalog, currentNamespace ); return new CatalogAndIdentifier(catalogIdentifier); } /** * This mimics a class inside of Spark which is private inside of LookupCatalog. */ public static class CatalogAndIdentifier { private final CatalogPlugin catalog; private final Identifier identifier; public CatalogAndIdentifier(CatalogPlugin catalog, Identifier identifier) { this.catalog = catalog; this.identifier = identifier; } public CatalogAndIdentifier(Pair<CatalogPlugin, Identifier> identifier) { this.catalog = identifier.first(); this.identifier = identifier.second(); } public CatalogPlugin catalog() { return catalog; } public Identifier identifier() { return identifier; } } public static TableIdentifier identifierToTableIdentifier(Identifier identifier) { return TableIdentifier.of(Namespace.of(identifier.namespace()), identifier.name()); } /** * Use Spark to list all partitions in the table. * * @param spark a Spark session * @param rootPath a table identifier * @param format format of the file * @return all table's partitions */ public static List<SparkPartition> getPartitions(SparkSession spark, Path rootPath, String format) { FileStatusCache fileStatusCache = FileStatusCache.getOrCreate(spark); Map<String, String> emptyMap = Collections.emptyMap(); InMemoryFileIndex fileIndex = new InMemoryFileIndex( spark, JavaConverters .collectionAsScalaIterableConverter(ImmutableList.of(rootPath)) .asScala() .toSeq(), JavaConverters .mapAsScalaMapConverter(emptyMap) .asScala() .toMap(Predef.conforms()), Option.empty(), fileStatusCache, Option.empty(), Option.empty()); org.apache.spark.sql.execution.datasources.PartitionSpec spec = fileIndex.partitionSpec(); StructType schema = spec.partitionColumns(); return JavaConverters .seqAsJavaListConverter(spec.partitions()) .asJava() .stream() .map(partition -> { Map<String, String> values = new HashMap<>(); JavaConverters.asJavaIterableConverter(schema).asJava().forEach(field -> { int fieldIndex = schema.fieldIndex(field.name()); Object catalystValue = partition.values().get(fieldIndex, field.dataType()); Object value = CatalystTypeConverters.convertToScala(catalystValue, field.dataType()); values.put(field.name(), value.toString()); }); return new SparkPartition(values, partition.path().toString(), format); }).collect(Collectors.toList()); } public static org.apache.spark.sql.catalyst.TableIdentifier toV1TableIdentifier(Identifier identifier) { String[] namespace = identifier.namespace(); Preconditions.checkArgument(namespace.length <= 1, "Cannot convert %s to a Spark v1 identifier, namespace contains more than 1 part", identifier); String table = identifier.name(); Option<String> database = namespace.length == 1 ? Option.apply(namespace[0]) : Option.empty(); return org.apache.spark.sql.catalyst.TableIdentifier.apply(table, database); } private static class DescribeSortOrderVisitor implements SortOrderVisitor<String> { private static final DescribeSortOrderVisitor INSTANCE = new DescribeSortOrderVisitor(); private DescribeSortOrderVisitor() { } @Override public String field(String sourceName, int sourceId, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("%s %s %s", sourceName, direction, nullOrder); } @Override public String bucket(String sourceName, int sourceId, int numBuckets, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("bucket(%s, %s) %s %s", numBuckets, sourceName, direction, nullOrder); } @Override public String truncate(String sourceName, int sourceId, int width, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("truncate(%s, %s) %s %s", sourceName, width, direction, nullOrder); } @Override public String year(String sourceName, int sourceId, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("years(%s) %s %s", sourceName, direction, nullOrder); } @Override public String month(String sourceName, int sourceId, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("months(%s) %s %s", sourceName, direction, nullOrder); } @Override public String day(String sourceName, int sourceId, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("days(%s) %s %s", sourceName, direction, nullOrder); } @Override public String hour(String sourceName, int sourceId, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("hours(%s) %s %s", sourceName, direction, nullOrder); } @Override public String unknown(String sourceName, int sourceId, String transform, org.apache.iceberg.SortDirection direction, NullOrder nullOrder) { return String.format("%s(%s) %s %s", transform, sourceName, direction, nullOrder); } } }
1
36,025
Instead of returning null here and discarding nulls automatically, I think this should return null (or some other signaling value) and fix the part that consumes the output of the visitor.
apache-iceberg
java
@@ -95,8 +95,13 @@ public class ForkingValidatorProvider implements ValidatorProvider { final long prevBlockNumber = blockNumber - 1L; final Optional<BlockHeader> prevBlockHeader = blockchain.getBlockHeader(prevBlockNumber); if (prevBlockHeader.isPresent()) { - return resolveValidatorProvider(prevBlockNumber) - .getValidatorsForBlock(prevBlockHeader.get()); + final Collection<Address> validatorsForPreviousBlock = + resolveValidatorProvider(prevBlockNumber) + .getValidatorsForBlock(prevBlockHeader.get()); + // Update VoteTallyCache + blockValidatorProvider.setValidatorsForBlock( + prevBlockHeader.get(), validatorsForPreviousBlock); + return validatorsForPreviousBlock; } } return getValidators.apply(validatorProvider);
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.consensus.qbft.validator; import org.hyperledger.besu.config.QbftConfigOptions; import org.hyperledger.besu.consensus.common.bft.BftForkSpec; import org.hyperledger.besu.consensus.common.bft.BftForksSchedule; import org.hyperledger.besu.consensus.common.validator.ValidatorProvider; import org.hyperledger.besu.consensus.common.validator.VoteProvider; import org.hyperledger.besu.consensus.common.validator.blockbased.BlockValidatorProvider; import org.hyperledger.besu.datatypes.Address; import org.hyperledger.besu.ethereum.chain.Blockchain; import org.hyperledger.besu.ethereum.core.BlockHeader; import java.util.Collection; import java.util.Optional; import java.util.function.Function; public class ForkingValidatorProvider implements ValidatorProvider { private final Blockchain blockchain; private final BftForksSchedule<QbftConfigOptions> forksSchedule; private final BlockValidatorProvider blockValidatorProvider; private final TransactionValidatorProvider transactionValidatorProvider; public ForkingValidatorProvider( final Blockchain blockchain, final BftForksSchedule<QbftConfigOptions> forksSchedule, final BlockValidatorProvider blockValidatorProvider, final TransactionValidatorProvider transactionValidatorProvider) { this.blockchain = blockchain; this.forksSchedule = forksSchedule; this.blockValidatorProvider = blockValidatorProvider; this.transactionValidatorProvider = transactionValidatorProvider; } @Override public Collection<Address> getValidatorsAtHead() { final BlockHeader chainHead = blockchain.getChainHeadHeader(); return getValidatorsAfterBlock(chainHead); } @Override public Collection<Address> getValidatorsAfterBlock(final BlockHeader parentHeader) { final long nextBlock = parentHeader.getNumber() + 1; final ValidatorProvider validatorProvider = resolveValidatorProvider(nextBlock); return getValidators( validatorProvider, nextBlock, p -> p.getValidatorsAfterBlock(parentHeader)); } @Override public Collection<Address> getValidatorsForBlock(final BlockHeader header) { final ValidatorProvider validatorProvider = resolveValidatorProvider(header.getNumber()); return getValidators( validatorProvider, header.getNumber(), p -> p.getValidatorsForBlock(header)); } @Override public Optional<VoteProvider> getVoteProviderAtHead() { return resolveValidatorProvider(blockchain.getChainHeadHeader().getNumber()) .getVoteProviderAtHead(); } @Override public Optional<VoteProvider> getVoteProviderAfterBlock(final BlockHeader header) { return resolveValidatorProvider(header.getNumber() + 1).getVoteProviderAtHead(); } private Collection<Address> getValidators( final ValidatorProvider validatorProvider, final long blockNumber, final Function<ValidatorProvider, Collection<Address>> getValidators) { final BftForkSpec<QbftConfigOptions> forkSpec = forksSchedule.getFork(blockNumber); // when moving to a block validator the first block needs to be initialised or created with // the previous block state otherwise we would have no validators // unless the validators are being explicitly overridden if (forkSpec.getConfigOptions().isValidatorBlockHeaderMode() && !blockValidatorProvider.hasValidatorOverridesForBlockNumber(blockNumber)) { if (forkSpec.getBlock() > 0 && blockNumber == forkSpec.getBlock()) { final long prevBlockNumber = blockNumber - 1L; final Optional<BlockHeader> prevBlockHeader = blockchain.getBlockHeader(prevBlockNumber); if (prevBlockHeader.isPresent()) { return resolveValidatorProvider(prevBlockNumber) .getValidatorsForBlock(prevBlockHeader.get()); } } return getValidators.apply(validatorProvider); } return getValidators.apply(validatorProvider); } private ValidatorProvider resolveValidatorProvider(final long block) { final BftForkSpec<QbftConfigOptions> fork = forksSchedule.getFork(block); return fork.getConfigOptions().isValidatorContractMode() ? transactionValidatorProvider : blockValidatorProvider; } }
1
26,606
Really, we want to be setting the validatorsForPreviousBlock against the yet to be created block, so it might make sense to update the cache once the block is created/imported instead.
hyperledger-besu
java
@@ -60,11 +60,11 @@ int set_properties_from_args(fpga_properties filter, fpga_result *result, {"segment", required_argument, NULL, 0xe}, {0, 0, 0, 0}, }; - int supported_options = sizeof(longopts) / sizeof(longopts[0]) - 1; int getopt_ret = -1; int option_index = 0; char *endptr = NULL; int found_opts[] = {0, 0, 0, 0}; + int supported_options = sizeof(found_opts) / sizeof(int); int next_found = 0; int old_opterr = opterr; opterr = 0;
1
// Copyright(c) 2018, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include "argsfilter.h" #include <getopt.h> #include <stdlib.h> #include <string.h> #ifdef _WIN32 #define EX_OK 0 #define EX_USAGE (-1) #define EX_SOFTWARE (-2) #else #include <sysexits.h> #endif #define RETURN_ON_ERR(res, desc) \ do { \ if ((res) != FPGA_OK) { \ optind = 1; \ opterr = old_opterr; \ fprintf(stderr, "Error %s: %s\n", (desc), \ fpgaErrStr(res)); \ return EX_SOFTWARE; \ } \ } while (0) int set_properties_from_args(fpga_properties filter, fpga_result *result, int *argc, char *argv[]) { // prefix the short options with '-' so that unrecognized options are // ignored const char *short_opts = "-:B:D:F:S:"; struct option longopts[] = { {"bus", required_argument, NULL, 'B'}, {"device", required_argument, NULL, 'D'}, {"function", required_argument, NULL, 'F'}, {"socket-id", required_argument, NULL, 'S'}, {"segment", required_argument, NULL, 0xe}, {0, 0, 0, 0}, }; int supported_options = sizeof(longopts) / sizeof(longopts[0]) - 1; int getopt_ret = -1; int option_index = 0; char *endptr = NULL; int found_opts[] = {0, 0, 0, 0}; int next_found = 0; int old_opterr = opterr; opterr = 0; struct _args_filter_config { int bus; int device; int function; int socket_id; int segment; } args_filter_config = { .bus = -1, .device = -1, .function = -1, .socket_id = -1, .segment = -1 }; while (-1 != (getopt_ret = getopt_long(*argc, argv, short_opts, longopts, &option_index))) { const char *tmp_optarg = optarg; if ((optarg) && ('=' == *tmp_optarg)) ++tmp_optarg; switch (getopt_ret) { case 'B': /* bus */ if (NULL == tmp_optarg) break; endptr = NULL; args_filter_config.bus = (int)strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid bus: %s\n", tmp_optarg); return EX_USAGE; } found_opts[next_found++] = optind - 2; break; case 'D': /* device */ if (NULL == tmp_optarg) break; endptr = NULL; args_filter_config.device = (int)strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid device: %s\n", tmp_optarg); return EX_USAGE; } found_opts[next_found++] = optind - 2; break; case 'F': /* function */ if (NULL == tmp_optarg) break; endptr = NULL; args_filter_config.function = (int)strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid function: %s\n", tmp_optarg); return EX_USAGE; } found_opts[next_found++] = optind - 2; break; case 'S': /* socket */ if (NULL == tmp_optarg) break; endptr = NULL; args_filter_config.socket_id = (int)strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid socket: %s\n", tmp_optarg); return EX_USAGE; } found_opts[next_found++] = optind - 2; break; case 0xe: /* segment */ if (NULL == tmp_optarg) break; endptr = NULL; args_filter_config.segment = (int)strtoul(tmp_optarg, &endptr, 0); if (endptr != tmp_optarg + strlen(tmp_optarg)) { fprintf(stderr, "invalid socket: %s\n", tmp_optarg); return EX_USAGE; } found_opts[next_found++] = optind - 2; break; case ':': /* missing option argument */ fprintf(stderr, "Missing option argument\n"); return EX_USAGE; case '?': break; case 1: break; default: /* invalid option */ fprintf(stderr, "Invalid cmdline options\n"); return EX_USAGE; } } if (-1 != args_filter_config.bus) { *result = fpgaPropertiesSetBus(filter, args_filter_config.bus); RETURN_ON_ERR(*result, "setting bus"); } if (-1 != args_filter_config.device) { *result = fpgaPropertiesSetDevice(filter, args_filter_config.device); RETURN_ON_ERR(*result, "setting device"); } if (-1 != args_filter_config.function) { *result = fpgaPropertiesSetFunction( filter, args_filter_config.function); RETURN_ON_ERR(*result, "setting function"); } if (-1 != args_filter_config.socket_id) { *result = fpgaPropertiesSetSocketID( filter, args_filter_config.socket_id); RETURN_ON_ERR(*result, "setting socket id"); } if (-1 != args_filter_config.segment) { *result = fpgaPropertiesSetSegment( filter, args_filter_config.segment); RETURN_ON_ERR(*result, "setting segment"); } // using the list of optind values // shorten the argv vector starting with a decrease // of 2 and incrementing that amount by two for each option found int removed = 0; int i, j; for (i = 0; i < supported_options; ++i) { if (found_opts[i]) { for (j = found_opts[i] - removed; j < *argc - 2; j++) { argv[j] = argv[j + 2]; } removed += 2; } else { break; } } *argc -= removed; // restore getopt variables // setting optind to zero will cause getopt to reinitialize for future // calls within the program optind = 0; opterr = old_opterr; return EX_OK; }
1
17,319
It seems to me that the original implementation was correct. Can you share what the scanner was pointing out?
OPAE-opae-sdk
c
@@ -178,6 +178,8 @@ function* webpackConfig( env, argv ) { './assets/js/googlesitekit-modules-search-console.js', 'googlesitekit-modules-subscribe-with-google': './assets/js/googlesitekit-modules-subscribe-with-google.js', + 'googlesitekit-subscribe-with-google-bulk-edit': + './assets/js/modules/subscribe-with-google/bulk-edit.js', 'googlesitekit-modules-tagmanager': './assets/js/googlesitekit-modules-tagmanager.js', 'googlesitekit-user-input':
1
/** * Webpack config. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Node dependencies */ const fs = require( 'fs' ); const path = require( 'path' ); /** * External dependencies */ const CircularDependencyPlugin = require( 'circular-dependency-plugin' ); const ESLintPlugin = require( 'eslint-webpack-plugin' ); const MiniCssExtractPlugin = require( 'mini-css-extract-plugin' ); const TerserPlugin = require( 'terser-webpack-plugin' ); const WebpackBar = require( 'webpackbar' ); const { DefinePlugin, ProvidePlugin } = require( 'webpack' ); const { BundleAnalyzerPlugin } = require( 'webpack-bundle-analyzer' ); const CreateFileWebpack = require( 'create-file-webpack' ); const ManifestPlugin = require( 'webpack-manifest-plugin' ); const features = require( './feature-flags.json' ); const projectPath = ( relativePath ) => { return path.resolve( fs.realpathSync( process.cwd() ), relativePath ); }; const manifestTemplate = `<?php /** * Class Google\\Site_Kit\\Core\\Assets\\Manifest * * @package Google\Site_Kit * @copyright ${ new Date().getFullYear() } Google LLC * @license https://www.apache.org/licenses/LICENSE-2.0 Apache License 2.0 * @link https://sitekit.withgoogle.com */ namespace Google\\Site_Kit\\Core\\Assets; /** * Assets manifest. * * @since 1.15.0 * @access private * @ignore */ class Manifest { public static $assets = array( {{assets}} ); } `; const noAMDParserRule = { parser: { amd: false } }; const siteKitExternals = { 'googlesitekit-api': [ 'googlesitekit', 'api' ], 'googlesitekit-data': [ 'googlesitekit', 'data' ], 'googlesitekit-modules': [ 'googlesitekit', 'modules' ], 'googlesitekit-widgets': [ 'googlesitekit', 'widgets' ], '@wordpress/i18n': [ 'googlesitekit', 'i18n' ], }; const externals = { ...siteKitExternals }; const svgRule = { test: /\.svg$/, use: [ { loader: '@svgr/webpack', options: { // strip width & height to allow manual override using props dimensions: false, }, }, ], }; const createRules = ( mode ) => [ noAMDParserRule, svgRule, { test: /\.js$/, exclude: /node_modules/, use: [ { loader: 'babel-loader', options: { sourceMap: mode !== 'production', babelrc: false, configFile: false, cacheDirectory: true, presets: [ '@wordpress/default', '@babel/preset-react' ], }, }, ], ...noAMDParserRule, }, ]; const resolve = { alias: { '@wordpress/api-fetch__non-shim': require.resolve( '@wordpress/api-fetch' ), '@wordpress/api-fetch$': path.resolve( 'assets/js/api-fetch-shim.js' ), '@wordpress/i18n__non-shim': require.resolve( '@wordpress/i18n' ), }, modules: [ projectPath( '.' ), 'node_modules' ], }; // Get the app version from the google-site-kit.php file - optional chaining operator not supported here const googleSiteKitFile = fs.readFileSync( path.resolve( __dirname, 'google-site-kit.php' ), 'utf8' ); const googleSiteKitVersion = googleSiteKitFile.match( /(?<='GOOGLESITEKIT_VERSION',\s+')\d+.\d+.\d+(?=')/gi ); const GOOGLESITEKIT_VERSION = googleSiteKitVersion ? googleSiteKitVersion[ 0 ] : ''; function* webpackConfig( env, argv ) { const { mode, flagMode = mode } = argv; const { ANALYZE } = env || {}; const rules = createRules( mode ); // Build the settings js.. yield { entry: { // New Modules (Post-JSR). 'googlesitekit-api': './assets/js/googlesitekit-api.js', 'googlesitekit-data': './assets/js/googlesitekit-data.js', 'googlesitekit-datastore-site': './assets/js/googlesitekit-datastore-site.js', 'googlesitekit-datastore-user': './assets/js/googlesitekit-datastore-user.js', 'googlesitekit-datastore-forms': './assets/js/googlesitekit-datastore-forms.js', 'googlesitekit-datastore-location': './assets/js/googlesitekit-datastore-location.js', 'googlesitekit-datastore-ui': './assets/js/googlesitekit-datastore-ui.js', 'googlesitekit-modules': './assets/js/googlesitekit-modules.js', 'googlesitekit-widgets': './assets/js/googlesitekit-widgets.js', 'googlesitekit-modules-adsense': './assets/js/googlesitekit-modules-adsense.js', 'googlesitekit-modules-analytics': './assets/js/googlesitekit-modules-analytics.js', 'googlesitekit-modules-analytics-4': './assets/js/googlesitekit-modules-analytics-4.js', 'googlesitekit-modules-idea-hub': './assets/js/googlesitekit-modules-idea-hub.js', 'googlesitekit-modules-optimize': './assets/js/googlesitekit-modules-optimize.js', 'googlesitekit-modules-pagespeed-insights': 'assets/js/googlesitekit-modules-pagespeed-insights.js', 'googlesitekit-modules-search-console': './assets/js/googlesitekit-modules-search-console.js', 'googlesitekit-modules-subscribe-with-google': './assets/js/googlesitekit-modules-subscribe-with-google.js', 'googlesitekit-modules-tagmanager': './assets/js/googlesitekit-modules-tagmanager.js', 'googlesitekit-user-input': './assets/js/googlesitekit-user-input.js', 'googlesitekit-idea-hub-post-list': './assets/js/googlesitekit-idea-hub-post-list.js', 'googlesitekit-idea-hub-notice': './assets/js/googlesitekit-idea-hub-notice.js', 'googlesitekit-polyfills': './assets/js/googlesitekit-polyfills.js', // Old Modules 'googlesitekit-activation': './assets/js/googlesitekit-activation.js', 'googlesitekit-adminbar': './assets/js/googlesitekit-adminbar.js', 'googlesitekit-settings': './assets/js/googlesitekit-settings.js', 'googlesitekit-dashboard': './assets/js/googlesitekit-dashboard.js', 'googlesitekit-dashboard-details': './assets/js/googlesitekit-dashboard-details.js', 'googlesitekit-dashboard-splash': './assets/js/googlesitekit-dashboard-splash.js', 'googlesitekit-wp-dashboard': './assets/js/googlesitekit-wp-dashboard.js', 'googlesitekit-base': './assets/js/googlesitekit-base.js', 'googlesitekit-module': './assets/js/googlesitekit-module.js', }, externals, output: { filename: mode === 'production' ? '[name].[contenthash].js' : '[name].js', path: path.join( __dirname, 'dist/assets/js' ), chunkFilename: mode === 'production' ? '[name].[chunkhash].js' : '[name].js', publicPath: '', /* If multiple webpack runtimes (from different compilations) are used on the same webpage, there is a risk of conflicts of on-demand chunks in the global namespace. See: https://webpack.js.org/configuration/output/#outputjsonpfunction. */ jsonpFunction: '__googlesitekit_webpackJsonp', }, performance: { maxEntrypointSize: 175000, }, module: { rules: [ ...rules ], }, plugins: [ new ProvidePlugin( { React: 'react', } ), new WebpackBar( { name: 'Module Entry Points', color: '#fbbc05', } ), new CircularDependencyPlugin( { exclude: /node_modules/, failOnError: true, allowAsyncCycles: false, cwd: process.cwd(), } ), new CreateFileWebpack( { path: './dist', fileName: 'config.json', content: JSON.stringify( { buildMode: flagMode, features, } ), } ), new ManifestPlugin( { fileName: path.resolve( __dirname, 'includes/Core/Assets/Manifest.php' ), filter( file ) { return ( file.name || '' ).match( /\.js$/ ); }, serialize( manifest ) { const maxLen = Math.max( ...Object.keys( manifest ).map( ( key ) => key.length ) ); const content = manifestTemplate.replace( '{{assets}}', Object.keys( manifest ) .map( ( key ) => `"${ key.replace( '.js', '' ) }"${ ''.padEnd( maxLen - key.length, ' ' ) } => "${ manifest[ key ] }",` ) .join( '\n\t\t' ) ); return content; }, } ), new DefinePlugin( { 'global.GOOGLESITEKIT_VERSION': JSON.stringify( GOOGLESITEKIT_VERSION ), } ), new ESLintPlugin( { emitError: true, emitWarning: true, failOnError: true, } ), ...( ANALYZE ? [ new BundleAnalyzerPlugin() ] : [] ), ], optimization: { minimizer: [ new TerserPlugin( { parallel: true, sourceMap: mode !== 'production', cache: true, terserOptions: { // We preserve function names that start with capital letters as // they're _likely_ component names, and these are useful to have // in tracebacks and error messages. keep_fnames: /__|_x|_n|_nx|sprintf|^[A-Z].+$/, output: { comments: /translators:/i, }, }, extractComments: false, } ), ], /* The runtimeChunk value 'single' creates a runtime file to be shared for all generated chunks. Without this, imported modules are initialized for each runtime chunk separately which results in duplicate module initialization when a shared module is imported by separate entries on the same page. See: https://v4.webpack.js.org/configuration/optimization/#optimizationruntimechunk */ runtimeChunk: 'single', splitChunks: { cacheGroups: { vendor: { chunks: 'initial', name: 'googlesitekit-vendor', filename: mode === 'production' ? 'googlesitekit-vendor.[contenthash].js' : 'googlesitekit-vendor.js', enforce: true, test: /[\\/]node_modules[\\/]/, }, }, }, }, resolve, }; if ( ANALYZE ) { return; } // Build basic modules that don't require advanced optimizations, splitting chunks, and so on... yield { entry: { 'googlesitekit-i18n': './assets/js/googlesitekit-i18n.js', // Analytics advanced tracking script to be injected in the frontend. 'analytics-advanced-tracking': './assets/js/analytics-advanced-tracking.js', }, externals, output: { filename: '[name].js', path: __dirname + '/dist/assets/js', publicPath: '', }, module: { rules, }, plugins: [ new WebpackBar( { name: 'Basic Modules', color: '#fb1105', } ), ], optimization: { concatenateModules: true, }, resolve, }; // Build the main plugin admin css. yield { entry: { admin: './assets/sass/admin.scss', adminbar: './assets/sass/adminbar.scss', wpdashboard: './assets/sass/wpdashboard.scss', }, module: { rules: [ { test: /\.scss$/, use: [ MiniCssExtractPlugin.loader, 'css-loader', 'postcss-loader', { loader: 'sass-loader', options: { implementation: require( 'sass' ), sassOptions: { includePaths: [ 'node_modules' ], }, }, }, ], }, { test: /\.(png|woff|woff2|eot|ttf|gif)$/, use: { loader: 'url-loader?limit=100000' }, }, ], }, plugins: [ new MiniCssExtractPlugin( { filename: '/assets/css/[name].css', } ), new WebpackBar( { name: 'Plugin CSS', color: '#4285f4', } ), ], }; } function testBundle( mode ) { return { entry: { 'e2e-api-fetch': './tests/e2e/assets/e2e-api-fetch.js', 'e2e-redux-logger': './tests/e2e/assets/e2e-redux-logger.js', }, output: { filename: '[name].js', path: __dirname + '/dist/assets/js', chunkFilename: '[name].js', publicPath: '', }, module: { rules: createRules( mode ), }, plugins: [ new WebpackBar( { name: 'Test files', color: '#34a853', } ), ], externals, resolve, }; } module.exports = { externals, noAMDParserRule, projectPath, resolve, siteKitExternals, svgRule, }; module.exports.default = ( env, argv ) => { const configs = []; const configGenerator = webpackConfig( env, argv ); for ( const config of configGenerator ) { configs.push( { ...config, stats: 'errors-warnings', } ); } const { includeTests, mode } = argv; if ( mode !== 'production' || includeTests ) { // Build the test files if we aren't doing a production build. configs.push( { ...testBundle(), stats: 'errors-warnings', } ); } return configs; };
1
41,299
Instead of doing that, please create an `assets/js/googlesitekit-subscribe-with-google-bulk-edit.js` file as entry point - all our entry points should be located in `assets/js`.
google-site-kit-wp
js
@@ -39,7 +39,11 @@ storiesOf( 'WordPress', module ) <div id="dashboard-widgets"> <div className="metabox-holder"> <div id="google_dashboard_widget" className="postbox"> - <h2 className="hndle ui-sortable-handle"><span><SvgIcon id="logo-g" height="19" width="19" /><SvgIcon id="logo-sitekit" height="17" width="78" /></span> + <h2 className="hndle ui-sortable-handle"> + <span><div className="googlesitekit-logo googlesitekit-logo-mini"> + <SvgIcon id="logo-g" height="19" width="19" /> + <SvgIcon id="logo-sitekit" height="17" width="78" /> + </div></span> </h2> <div className="inside"> <div id="js-googlesitekit-wp-dashboard">
1
import { storiesOf } from '@storybook/react'; import SvgIcon from 'GoogleUtil/svg-icon'; import WPDashboardMain from 'GoogleComponents/wp-dashboard/wp-dashboard-main'; import { googlesitekit as wpDashboardData } from '../.storybook/data/wp-admin-index.php--googlesitekit'; import WPSearchConsoleDashboardWidget from 'GoogleModules/search-console/wp-dashboard/wp-dashboard-widget'; import { createAddToFilter } from 'GoogleUtil/helpers'; import WPAnalyticsDashboardWidgetOverview from 'GoogleModules/analytics/wp-dashboard/wp-dashboard-widget-overview'; import WPAnalyticsDashboardWidgetTopPagesTable from 'GoogleModules/analytics/wp-dashboard/wp-dashboard-widget-top-pages-table'; storiesOf( 'WordPress', module ) .add( 'WordPress Dashboard', () => { window.googlesitekit = wpDashboardData; window.googlesitekit.admin.assetsRoot = '/assets/'; const addWPSearchConsoleDashboardWidget = createAddToFilter( <WPSearchConsoleDashboardWidget /> ); const addWPAnalyticsDashboardWidgetOverview = createAddToFilter( <WPAnalyticsDashboardWidgetOverview /> ); const addWPAnalyticsDashboardWidgetTopPagesTable = createAddToFilter( <WPAnalyticsDashboardWidgetTopPagesTable /> ); wp.hooks.removeAllFilters( 'googlesitekit.WPDashboardHeader' ); wp.hooks.addFilter( 'googlesitekit.WPDashboardHeader', 'googlesitekit.SearchConsole', addWPSearchConsoleDashboardWidget, 11 ); wp.hooks.addFilter( 'googlesitekit.WPDashboardHeader', 'googlesitekit.Analytics', addWPAnalyticsDashboardWidgetOverview ); wp.hooks.addFilter( 'googlesitekit.WPDashboardModule', 'googlesitekit.Analytics', addWPAnalyticsDashboardWidgetTopPagesTable ); setTimeout( () => { wp.hooks.doAction( 'googlesitekit.moduleLoaded', 'WPDashboard' ); }, 250 ); return ( <div id="dashboard-widgets"> <div className="metabox-holder"> <div id="google_dashboard_widget" className="postbox"> <h2 className="hndle ui-sortable-handle"><span><SvgIcon id="logo-g" height="19" width="19" /><SvgIcon id="logo-sitekit" height="17" width="78" /></span> </h2> <div className="inside"> <div id="js-googlesitekit-wp-dashboard"> <WPDashboardMain/> </div> </div> </div> </div> </div> ); }, { options: { readySelector: '.googlesitekit-data-block', delay: 2000, // Wait for table overlay to animate. } } );
1
24,677
This is not valid, a `div` shouldn't be in a `span`. You could apply the `className` attribute to the `span` tag.
google-site-kit-wp
js
@@ -100,6 +100,8 @@ func AddFlags(flagSet *pflag.FlagSet) { flags.IntVarP(flagSet, &fs.Config.MultiThreadStreams, "multi-thread-streams", "", fs.Config.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.") flags.DurationVarP(flagSet, &fs.Config.RcJobExpireDuration, "rc-job-expire-duration", "", fs.Config.RcJobExpireDuration, "expire finished async jobs older than this value") flags.DurationVarP(flagSet, &fs.Config.RcJobExpireInterval, "rc-job-expire-interval", "", fs.Config.RcJobExpireInterval, "interval to check for expired async jobs") + flags.DurationVarP(flagSet, &fs.Config.TransferredExpireDuration, "accounting-transferred-expire-duration", "", fs.Config.TransferredExpireDuration, "remove transferred accounting stats older than this value") + flags.DurationVarP(flagSet, &fs.Config.TransferredExpireInterval, "accounting-transferred-expire-interval", "", fs.Config.TransferredExpireInterval, "interval to check for expired transferred accounting stats") } // SetFlags converts any flags into config which weren't straight forward
1
// Package configflags defines the flags used by rclone. It is // decoupled into a separate package so it can be replaced. package configflags // Options set by command line flags import ( "log" "net" "path/filepath" "strings" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" "github.com/ncw/rclone/fs/config/flags" "github.com/ncw/rclone/fs/rc" "github.com/spf13/pflag" ) var ( // these will get interpreted into fs.Config via SetFlags() below verbose int quiet bool dumpHeaders bool dumpBodies bool deleteBefore bool deleteDuring bool deleteAfter bool bindAddr string disableFeatures string ) // AddFlags adds the non filing system specific flags to the command func AddFlags(flagSet *pflag.FlagSet) { rc.AddOption("main", fs.Config) // NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)") flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible") flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same") flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.") flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.") flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.") flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.") flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size") flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum") flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files") flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination") flags.BoolVarP(flagSet, &fs.Config.IgnoreErrors, "ignore-errors", "", fs.Config.IgnoreErrors, "delete even if there are I/O errors") flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes") flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout") flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout") flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info") flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info") flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.") flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.") flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring") flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer") flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)") flags.IntVar64P(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes") flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible") flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.") flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.") flags.BoolVarP(flagSet, &fs.Config.UseServerModTime, "use-server-modtime", "", fs.Config.UseServerModTime, "Use server modified time instead of object metadata") flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.") flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.") flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.") flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.") flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing") flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.") flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.") flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.") flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.") flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.") flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.") flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.") flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.") flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.") flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.") flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version") flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.") flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.") flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit") flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.") flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.") flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.") flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.") flags.BoolVarP(flagSet, &fs.Config.StatsOneLineDate, "stats-one-line-date", "", fs.Config.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.") flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format") flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.") flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.") flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).") flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers") flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth") flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth") flags.FVarP(flagSet, &fs.Config.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.") flags.IntVarP(flagSet, &fs.Config.MultiThreadStreams, "multi-thread-streams", "", fs.Config.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.") flags.DurationVarP(flagSet, &fs.Config.RcJobExpireDuration, "rc-job-expire-duration", "", fs.Config.RcJobExpireDuration, "expire finished async jobs older than this value") flags.DurationVarP(flagSet, &fs.Config.RcJobExpireInterval, "rc-job-expire-interval", "", fs.Config.RcJobExpireInterval, "interval to check for expired async jobs") } // SetFlags converts any flags into config which weren't straight forward func SetFlags() { if verbose >= 2 { fs.Config.LogLevel = fs.LogLevelDebug } else if verbose >= 1 { fs.Config.LogLevel = fs.LogLevelInfo } if quiet { if verbose > 0 { log.Fatalf("Can't set -v and -q") } fs.Config.LogLevel = fs.LogLevelError } logLevelFlag := pflag.Lookup("log-level") if logLevelFlag != nil && logLevelFlag.Changed { if verbose > 0 { log.Fatalf("Can't set -v and --log-level") } if quiet { log.Fatalf("Can't set -q and --log-level") } } if dumpHeaders { fs.Config.Dump |= fs.DumpHeaders fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead") } if dumpBodies { fs.Config.Dump |= fs.DumpBodies fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead") } switch { case deleteBefore && (deleteDuring || deleteAfter), deleteDuring && deleteAfter: log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`) case deleteBefore: fs.Config.DeleteMode = fs.DeleteModeBefore case deleteDuring: fs.Config.DeleteMode = fs.DeleteModeDuring case deleteAfter: fs.Config.DeleteMode = fs.DeleteModeAfter default: fs.Config.DeleteMode = fs.DeleteModeDefault } if fs.Config.IgnoreSize && fs.Config.SizeOnly { log.Fatalf(`Can't use --size-only and --ignore-size together.`) } if fs.Config.Suffix != "" && fs.Config.BackupDir == "" { log.Fatalf(`Can only use --suffix with --backup-dir.`) } switch { case len(fs.Config.StatsOneLineDateFormat) > 0: fs.Config.StatsOneLineDate = true fs.Config.StatsOneLine = true case fs.Config.StatsOneLineDate: fs.Config.StatsOneLineDateFormat = "2006/01/02 15:04:05 - " fs.Config.StatsOneLine = true } if bindAddr != "" { addrs, err := net.LookupIP(bindAddr) if err != nil { log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err) } if len(addrs) != 1 { log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs)) } fs.Config.BindAddr = addrs[0] } if disableFeatures != "" { if disableFeatures == "help" { log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", ")) } fs.Config.DisableFeatures = strings.Split(disableFeatures, ",") } // Make the config file absolute configPath, err := filepath.Abs(config.ConfigPath) if err == nil { config.ConfigPath = configPath } }
1
8,773
Would it be possible to reuse RcJobExpireDuration and RcJobExpireInterval and auto delete them when job is deleted?
rclone-rclone
go
@@ -18,7 +18,13 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. +from __future__ import print_function + import copy +import sys + +from jinja2 import Environment +from jinja2 import PackageLoader def merge_dicts(a, b, raise_conflicts=False, path=None):
1
# Copyright (c) 2015 Cisco Systems # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import copy def merge_dicts(a, b, raise_conflicts=False, path=None): """ Merges the values of B into A. If the raise_conflicts flag is set to True, a LookupError will be raised if the keys are conflicting. :param a: the target dictionary :param b: the dictionary to import :param raise_conflicts: flag to raise an exception if two keys are colliding :param path: the dictionary path. Used to show where the keys are conflicting when an exception is raised. :return: The dictionary A with the values of the dictionary B merged into it. """ # Set path. if path is None: path = [] # Go through the keys of the 2 dictionaries. for key in b: # If the key exist in both dictionary, check whether we must update or not. if key in a: # Dig deeper for keys that have dictionary values. if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dicts(a[key], b[key], raise_conflicts=raise_conflicts, path=(path + [str(key)])) # Skip the identical values. elif a[key] == b[key]: pass else: # Otherwise raise an error if the same keys have different values. if raise_conflicts: raise LookupError("Conflict at '{path}'".format(path='.'.join(path + [str(key)]))) # Or replace the value of A with the value of B. a[key] = b[key] else: # If the key does not exist in A, import it. a[key] = copy.deepcopy(b[key]) if isinstance(b[key], dict) else b[key] return a
1
5,676
Python wants **future** imports to come first.
ansible-community-molecule
py
@@ -2267,4 +2267,5 @@ detach_on_permanent_stack(bool internal, bool do_cleanup, dr_stats_t *drstats) SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); dynamo_detaching_flag = LOCK_FREE_STATE; EXITING_DR(); + options_detach(); }
1
/* ********************************************************** * Copyright (c) 2012-2018 Google, Inc. All rights reserved. * Copyright (c) 2008-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* * thread.c - thread synchronization */ #include "globals.h" #include "synch.h" #include "instrument.h" /* is_in_client_lib() */ #include "hotpatch.h" /* hotp_only_in_tramp() */ #include "fragment.h" /* get_at_syscall() */ #include "fcache.h" /* in_fcache() */ #include "translate.h" #include "native_exec.h" #include <string.h> /* for memcpy */ extern vm_area_vector_t *fcache_unit_areas; /* from fcache.c */ static bool started_detach = false; /* set before synchall */ bool doing_detach = false; /* set after synchall */ static void synch_thread_yield(void); /* Thread-local data */ typedef struct _thread_synch_data_t { /* the following three fields are used to synchronize for detach, suspend * thread, terminate thread, terminate process */ /* synch_lock and pending_synch_count act as a semaphore */ /* for check_wait_at_safe_spot() must use a spin_mutex_t */ spin_mutex_t *synch_lock; /* we allow pending_synch_count to be read without holding the synch_lock * so all updates should be ATOMIC as well as holding the lock */ int pending_synch_count; /* To guarantee that the thread really has this permission you need to hold the * synch_lock when you read this value. If the target thread is suspended, use a * trylock, as it could have been suspended while holding synch_lock (i#2805). */ thread_synch_permission_t synch_perm; /* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set * to whether synch_with_all_threads was successful in synching this thread. */ bool synch_with_success; /* Case 10101: allows threads waiting_at_safe_spot() to set their own * contexts. This use sometimes requires a full os-specific context, which * we hide behind a generic pointer and a size. */ priv_mcontext_t *set_mcontext; void *set_context; size_t set_context_size; #ifdef X64 /* PR 263338: we have to pad for alignment */ byte *set_context_alloc; #endif } thread_synch_data_t; /* This lock prevents more than one thread from being in the synch_with_all_ * threads method body at the same time (which would lead to deadlock as they * tried to synchronize with each other) */ DECLARE_CXTSWPROT_VAR(mutex_t all_threads_synch_lock, INIT_LOCK_FREE(all_threads_synch_lock)); /* pass either mc or both cxt and cxt_size */ static void free_setcontext(priv_mcontext_t *mc, void *cxt, size_t cxt_size _IF_X64(byte *cxt_alloc)) { if (mc != NULL) { ASSERT(cxt == NULL); global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER)); } else if (cxt != NULL) { ASSERT(cxt_size > 0); global_heap_free(IF_X64_ELSE(cxt_alloc, cxt), cxt_size HEAPACCT(ACCT_OTHER)); } } static void synch_thread_free_setcontext(thread_synch_data_t *tsd) { free_setcontext(tsd->set_mcontext, tsd->set_context, tsd->set_context_size _IF_X64(tsd->set_context_alloc)); tsd->set_mcontext = NULL; tsd->set_context = NULL; } void synch_init(void) { } void synch_exit(void) { ASSERT(uninit_thread_count == 0); DELETE_LOCK(all_threads_synch_lock); } void synch_thread_init(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *)heap_alloc( dcontext, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER)); dcontext->synch_field = (void *)tsd; tsd->pending_synch_count = 0; tsd->synch_perm = THREAD_SYNCH_NONE; tsd->synch_with_success = false; tsd->set_mcontext = NULL; tsd->set_context = NULL; /* the synch_lock is in unprotected memory so that check_wait_at_safe_spot * can call the EXITING_DR hook before releasing it */ tsd->synch_lock = HEAP_TYPE_ALLOC(dcontext, spin_mutex_t, ACCT_OTHER, UNPROTECTED); ASSIGN_INIT_SPINMUTEX_FREE(*tsd->synch_lock, synch_lock); } void synch_thread_exit(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; /* Could be waiting at safe spot when we detach or exit */ synch_thread_free_setcontext(tsd); DELETE_SPINMUTEX(*tsd->synch_lock); /* Note that we do need to free this in non-debug builds since, despite * appearances, UNPROTECTED_LOCAL is acutally allocated on a global * heap. */ HEAP_TYPE_FREE(dcontext, tsd->synch_lock, spin_mutex_t, ACCT_OTHER, UNPROTECTED); #ifdef DEBUG /* for non-debug we do fast exit path and don't free local heap */ /* clean up tsd fields here */ heap_free(dcontext, tsd, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER)); #endif } /* Check for a no-xfer permission. Currently used only for case 6821, * where we need to distinguish three groups: unsafe (wait for safe * point), safe and translatable, and safe but not translatable. */ bool thread_synch_state_no_xfer(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; /* We use a trylock in case the thread is suspended holding synch_lock (i#2805). */ if (spinmutex_trylock(tsd->synch_lock)) { bool res = (tsd->synch_perm == THREAD_SYNCH_NO_LOCKS_NO_XFER || tsd->synch_perm == THREAD_SYNCH_VALID_MCONTEXT_NO_XFER); spinmutex_unlock(tsd->synch_lock); return res; } return false; } bool thread_synch_check_state(dcontext_t *dcontext, thread_synch_permission_t desired_perm) { thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; /* We support calling this routine from our signal handler when it has interrupted * DR and might be holding tsd->synch_lock or other locks. * We first check synch_perm w/o a lock and if it's not at least * THREAD_SYNCH_NO_LOCKS we do not attempt to grab synch_lock (we'd hit rank order * violations). If that check passes, the only problematic lock is if we already * hold synch_lock, so we use test and trylocks there. */ if (desired_perm < THREAD_SYNCH_NO_LOCKS) { ASSERT(desired_perm == THREAD_SYNCH_NONE); return true; } if (!THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm)) return false; /* barrier to keep the 1st check above on this side of the lock below */ #ifdef WINDOWS MemoryBarrier(); #else __asm__ __volatile__("" : : : "memory"); #endif /* We use a trylock in case the thread is suspended holding synch_lock (i#2805). * We start with testlock to avoid recursive lock assertions. */ if (!spinmutex_testlock(tsd->synch_lock) && spinmutex_trylock(tsd->synch_lock)) { bool res = THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm); spinmutex_unlock(tsd->synch_lock); return res; } return false; } /* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set to * whether synch_with_all_threads was successful in synching this thread. * Cannot be called when THREAD_SYNCH_*_AND_CLEANED was requested as the * thread-local memory will be freed on success! */ bool thread_synch_successful(thread_record_t *tr) { thread_synch_data_t *tsd; ASSERT(tr != NULL && tr->dcontext != NULL); ASSERT_OWN_MUTEX(true, &all_threads_synch_lock); ASSERT_OWN_MUTEX(true, &thread_initexit_lock); tsd = (thread_synch_data_t *)tr->dcontext->synch_field; return tsd->synch_with_success; } #ifdef UNIX /* i#2659: the kernel is now doing auto-restart so we have to check for the * pc being at the syscall. */ static bool is_after_or_restarted_do_syscall(dcontext_t *dcontext, app_pc pc, bool check_vsyscall) { if (is_after_do_syscall_addr(dcontext, pc)) return true; if (check_vsyscall && pc == vsyscall_sysenter_return_pc) return true; if (!get_at_syscall(dcontext)) /* rule out having just reached the syscall */ return false; int syslen = syscall_instr_length(dr_get_isa_mode(dcontext)); if (is_after_do_syscall_addr(dcontext, pc + syslen)) return true; if (check_vsyscall && pc + syslen == vsyscall_sysenter_return_pc) return true; return false; } #endif bool is_at_do_syscall(dcontext_t *dcontext, app_pc pc, byte *esp) { app_pc buf[2]; bool res = safe_read(esp, sizeof(buf), buf); if (!res) { ASSERT(res); /* we expect the stack to always be readable */ return false; } if (does_syscall_ret_to_callsite()) { #ifdef WINDOWS if (get_syscall_method() == SYSCALL_METHOD_INT && DYNAMO_OPTION(sygate_int)) { return (pc == after_do_syscall_addr(dcontext) && buf[0] == after_do_syscall_code(dcontext)); } else { return pc == after_do_syscall_code(dcontext); } #else return is_after_or_restarted_do_syscall(dcontext, pc, false /*!vsys*/); #endif } else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) { #ifdef WINDOWS if (pc == vsyscall_after_syscall) { if (DYNAMO_OPTION(sygate_sysenter)) return buf[1] == after_do_syscall_code(dcontext); else return buf[0] == after_do_syscall_code(dcontext); } else { /* not at a system call, could still have tos match after_do_syscall * either by chance or because we leak that value on the apps stack * (a non transparency) */ ASSERT_CURIOSITY(buf[0] != after_do_syscall_code(dcontext)); return false; } #else /* Even when the main syscall method is sysenter, we also have a * do_int_syscall and do_clone_syscall that use int, so check both. * Note that we don't modify the stack, so once we do sysenter syscalls * inlined in the cache (PR 288101) we'll need some mechanism to * distinguish those: but for now if a sysenter instruction is used it * has to be do_syscall since DR's own syscalls are ints. */ return is_after_or_restarted_do_syscall(dcontext, pc, true /*vsys*/); #endif } /* we can reach here w/ a fault prior to 1st syscall on Linux */ IF_WINDOWS(ASSERT_NOT_REACHED()); return false; } /* Helper function for at_safe_spot(). Note state for client-owned threads isn't * considered valid since it may be holding client locks and doesn't correspond to * an actual app state. Caller should handle client-owned threads appropriately. */ static bool is_native_thread_state_valid(dcontext_t *dcontext, app_pc pc, byte *esp) { /* ref case 3675, the assumption is that if we aren't executing * out of dr memory and our stack isn't in dr memory (to disambiguate * pc in kernel32, ntdll etc.) then the app has a valid native context. * However, we can't call is_dynamo_address() as it (and its children) * grab too many different locks, all of which we would have to check * here in the same manner as fcache_unit_areas.lock in at_safe_spot(). So * instead we just check the pc for the dr dll, interception code, and * do_syscall regions and check the stack against the thread's dr stack * and the initstack, all of which we can do without grabbing any locks. * That should be sufficient at this point, FIXME try to use something * like is_dynamo_address() to make this more maintainable */ /* For sysenter system calls we also have to check the top of the stack * for the after_do_syscall_address to catch the do_syscall @ syscall * itself case. */ ASSERT(esp != NULL); ASSERT(is_thread_currently_native(dcontext->thread_record)); #ifdef WINDOWS if (pc == (app_pc)thread_attach_takeover) { /* We are trying to take over this thread but it has not yet been * scheduled. It was native, and can't hold any DR locks. */ return true; } #endif return (!is_in_dynamo_dll(pc) && IF_WINDOWS(!is_part_of_interception(pc) &&)( !in_generated_routine(dcontext, pc) || /* we allow native thread to be at do_syscall - for int syscalls the pc * (syscall return point) will be in do_syscall (so in generated routine) * xref case 9333 */ is_at_do_syscall(dcontext, pc, esp)) && !is_on_initstack(esp) && !is_on_dstack(dcontext, esp) && IF_CLIENT_INTERFACE(!is_in_client_lib(pc) &&) /* xref PR 200067 & 222812 on client-owned native threads */ IF_CLIENT_INTERFACE(!IS_CLIENT_THREAD(dcontext) &&) #ifdef HOT_PATCHING_INTERFACE /* Shouldn't be in the middle of executing a hotp_only patch. The * check for being in hotp_dll is DR_WHERE_HOTPATCH because the patch can * change esp. */ (dcontext->whereami != DR_WHERE_HOTPATCH && /* dynamo dll check has been done */ !hotp_only_in_tramp(pc)) && #endif true /* no effect, simplifies ifdef handling with && above */ ); } /* Translates the context mcontext for the given thread trec. If * restore_memory is true, also restores any memory values that were * shifted (primarily due to clients). If restore_memory is true, the * caller should always relocate the translated thread, as it may not * execute properly if left at its current location (it could be in the * middle of client code in the cache). * If recreate_app_state() is called, f will be passed through to it. * * Like any instance where a thread_record_t is used by a thread other than its * owner, the caller must hold the thread_initexit_lock to ensure that it * remains valid. * Requires thread trec is at_safe_spot(). */ bool translate_mcontext(thread_record_t *trec, priv_mcontext_t *mcontext, bool restore_memory, fragment_t *f) { thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field; bool res; recreate_success_t success; bool native_translate = false; ASSERT(tsd->pending_synch_count >= 0); /* check if native thread */ if (is_thread_currently_native(trec)) { /* running natively, no need to translate unless at do_syscall for an * intercepted-via-trampoline syscall which we allow now for case 9333 */ #ifdef CLIENT_INTERFACE if (IS_CLIENT_THREAD(trec->dcontext)) { /* don't need to translate anything */ LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread " TIDFMT " is client " "thread, no translation needed\n", trec->id); return true; } #endif if (is_native_thread_state_valid(trec->dcontext, (app_pc)mcontext->pc, (byte *)mcontext->xsp)) { #ifdef WINDOWS if ((app_pc)mcontext->pc == (app_pc)thread_attach_takeover) { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread " TIDFMT " at " "takeover point\n", trec->id); thread_attach_translate(trec->dcontext, mcontext, restore_memory); return true; } #endif if (is_at_do_syscall(trec->dcontext, (app_pc)mcontext->pc, (byte *)mcontext->xsp)) { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread " TIDFMT " running " "natively, at do_syscall so translation needed\n", trec->id); native_translate = true; } else { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread " TIDFMT " running " "natively, no translation needed\n", trec->id); return true; } } else { /* now that do_syscall is a safe spot for native threads we shouldn't get * here for get context on self, FIXME - is however possible to get here * via get_context on unsuspended thread (result of which is technically * undefined according to MS), see get_context post sys comments * (should prob. synch there in which case can assert here) */ ASSERT(trec->id != get_thread_id()); ASSERT_CURIOSITY(false && "translate failure, likely get context on " "unsuspended native thread"); /* we'll just try to translate and hope for the best */ native_translate = true; } } if (!native_translate) { /* check if waiting at a good spot */ spinmutex_lock(tsd->synch_lock); res = THREAD_SYNCH_SAFE(tsd->synch_perm, THREAD_SYNCH_VALID_MCONTEXT); spinmutex_unlock(tsd->synch_lock); if (res) { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread " TIDFMT " waiting at " "valid mcontext point, copying over\n", trec->id); DOLOG(2, LOG_SYNCH, { LOG(THREAD_GET, LOG_SYNCH, 2, "Thread State\n"); dump_mcontext(get_mcontext(trec->dcontext), THREAD_GET, DUMP_NOT_XML); }); *mcontext = *get_mcontext(trec->dcontext); return true; } } /* In case 4148 we see a thread calling NtGetContextThread on itself, which * is undefined according to MS but it does get the syscall address, so it's * fine with us. For other threads the app shouldn't be asking about them * unless they're suspended, and the same goes for us. */ ASSERT_CURIOSITY(trec->dcontext->whereami == DR_WHERE_FCACHE || trec->dcontext->whereami == DR_WHERE_SIGNAL_HANDLER || native_translate || trec->id == get_thread_id()); LOG(THREAD_GET, LOG_SYNCH, 2, "translate context, thread " TIDFMT " at pc_recreatable spot translating\n", trec->id); success = recreate_app_state(trec->dcontext, mcontext, restore_memory, f); if (success != RECREATE_SUCCESS_STATE) { /* should never happen right? * actually it does when deciding whether can deliver a signal * immediately (PR 213040). */ LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread " TIDFMT " unable to translate context at pc" " = " PFX "\n", trec->id, mcontext->pc); SYSLOG_INTERNAL_WARNING_ONCE("failed to translate"); return false; } return true; } static bool waiting_at_safe_spot(thread_record_t *trec, thread_synch_state_t desired_state) { thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field; ASSERT(tsd->pending_synch_count >= 0); /* Check if waiting at a good spot. We can't spin in case the suspended thread is * holding this lock (e.g., i#2805). We only need the lock to check synch_perm. */ if (spinmutex_trylock(tsd->synch_lock)) { thread_synch_permission_t perm = tsd->synch_perm; bool res = THREAD_SYNCH_SAFE(perm, desired_state); spinmutex_unlock(tsd->synch_lock); if (res) { LOG(THREAD_GET, LOG_SYNCH, 2, "thread " TIDFMT " waiting at safe spot (synch_perm=%d)\n", trec->id, perm); return true; } } else { LOG(THREAD_GET, LOG_SYNCH, 2, "at_safe_spot unable to get locks to test if thread " TIDFMT " is waiting " "at safe spot\n", trec->id); } return false; } #ifdef CLIENT_SIDELINE static bool should_suspend_client_thread(dcontext_t *dcontext, thread_synch_state_t desired_state) { /* Marking un-suspendable does not apply to cleaning/terminating */ ASSERT(IS_CLIENT_THREAD(dcontext)); return (THREAD_SYNCH_IS_CLEANED(desired_state) || dcontext->client_data->suspendable); } #endif /* checks whether the thread trec is at a spot suitable for requested define * desired_state * Requires that trec thread is suspended */ /* Note that since trec is potentially suspended at an arbitrary point, * this function (and any function it calls) cannot call mutex_lock as * trec thread may hold a lock. It is ok for at_safe_spot to return false if * it can't obtain a lock on the first try. FIXME : in the long term we may * want to go to a locking model that stores the thread id of the owner in * which case we can check for this situation directly */ bool at_safe_spot(thread_record_t *trec, priv_mcontext_t *mc, thread_synch_state_t desired_state) { bool safe = false; if (waiting_at_safe_spot(trec, desired_state)) return true; #ifdef ARM if (TESTANY(EFLAGS_IT, mc->cpsr)) { LOG(THREAD_GET, LOG_SYNCH, 2, "thread " TIDFMT " not at safe spot (pc=" PFX " in an IT block) for %d\n", trec->id, mc->pc, desired_state); return false; } #endif /* check if suspended at good spot */ /* FIXME: right now don't distinguish between suspend and term privileges * even though suspend is stronger requirement, are the checks below * sufficient */ /* FIXME : check with respect to flush, should be ok */ /* test fcache_unit_areas.lock (from fcache.c) before calling recreate_app_state * since it calls in_fcache() which uses the lock (if we are in_fcache() * assume other locks are not a problem (so is_dynamo_address is fine)) */ /* Right now the only dr code that ends up in the cache is our DLL main * (which we'll reduce/get rid of with libc independence), our takeover * from preinject return stack, and the callback.c interception code. * FIXME : test for just these and ASSERT(!is_dynamo_address) otherwise */ if (is_thread_currently_native(trec)) { /* thread is running native, verify is not in dr code */ #ifdef CLIENT_INTERFACE /* We treat client-owned threads (such as a client nudge thread) as native and * consider them safe if they are in the client_lib. Since they might own client * locks that could block application threads from progressing, we synchronize * with them last. FIXME - xref PR 231301 - since we can't disambiguate * client->ntdll/gencode which is safe from client->dr->ntdll/gencode which isn't * we disallow both. This could hurt synchronization efficiency if the client * owned thread spent most of its execution time calling out of its lib to ntdll * routines or generated code. */ if (IS_CLIENT_THREAD(trec->dcontext)) { safe = (trec->dcontext->client_data->client_thread_safe_for_synch || is_in_client_lib(mc->pc)) && /* Do not cleanup/terminate a thread holding a client lock (PR 558463) */ /* Actually, don't consider a thread holding a client lock to be safe * at all (PR 609569): client should use * dr_client_thread_set_suspendable(false) if its thread spends a lot * of time holding locks. */ (!should_suspend_client_thread(trec->dcontext, desired_state) || trec->dcontext->client_data->mutex_count == 0); } #endif if (is_native_thread_state_valid(trec->dcontext, mc->pc, (byte *)mc->xsp)) { safe = true; /* We should always be able to translate a valid native state, but be * sure to check before thread_attach_exit(). */ ASSERT(translate_mcontext(trec, mc, false /*just querying*/, NULL)); #ifdef WINDOWS if (mc->pc == (app_pc)thread_attach_takeover && THREAD_SYNCH_IS_CLEANED(desired_state)) { /* The takeover data will be freed at process exit, but we might * clean up a thread mid-run, so make sure we free the data. */ thread_attach_exit(trec->dcontext, mc); } #endif } #ifdef CLIENT_INTERFACE } else if (desired_state == THREAD_SYNCH_TERMINATED_AND_CLEANED && trec->dcontext->whereami == DR_WHERE_FCACHE && trec->dcontext->client_data->at_safe_to_terminate_syscall) { /* i#1420: At safe to terminate syscall like dr_sleep in a clean call. * XXX: A thread in dr_sleep might not be safe to terminate for some * corner cases: for example, a client may hold a lock and then go sleep, * terminating it may mess the client up for not releasing the lock. * We limit this to the thread being in fcache (i.e., from a clean call) * to rule out some corner cases. */ safe = true; #endif } else if ((!WRITE_LOCK_HELD(&fcache_unit_areas->lock) && /* even though we only need the read lock, if our target holds it * and a 3rd thread requests the write lock, we'll hang if we * ask for the read lock (case 7493) */ !READ_LOCK_HELD(&fcache_unit_areas->lock)) && recreate_app_state(trec->dcontext, mc, false /*just query*/, NULL) == RECREATE_SUCCESS_STATE && /* It's ok to call is_dynamo_address even though it grabs many * locks because recreate_app_state succeeded. */ !is_dynamo_address(mc->pc)) { safe = true; } if (safe) { ASSERT(trec->dcontext->whereami == DR_WHERE_FCACHE || trec->dcontext->whereami == DR_WHERE_SIGNAL_HANDLER || is_thread_currently_native(trec)); LOG(THREAD_GET, LOG_SYNCH, 2, "thread " TIDFMT " suspended at safe spot pc=" PFX "\n", trec->id, mc->pc); return true; } LOG(THREAD_GET, LOG_SYNCH, 2, "thread " TIDFMT " not at safe spot (pc=" PFX ") for %d\n", trec->id, mc->pc, desired_state); return false; } /* a fast way to tell a thread if it should call check_wait_at_safe_spot * if translating context would be expensive */ bool should_wait_at_safe_spot(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; return (tsd->pending_synch_count != 0); } /* use with care! normally check_wait_at_safe_spot() should be called instead */ void set_synch_state(dcontext_t *dcontext, thread_synch_permission_t state) { if (state >= THREAD_SYNCH_NO_LOCKS) ASSERT_OWN_NO_LOCKS(); thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; /* We have a wart in the settings here (i#2805): a caller can set * THREAD_SYNCH_NO_LOCKS, yet here we're acquiring locks. In fact if this thread * is suspended in between the lock and the unset of synch_perm from * THREAD_SYNCH_NO_LOCKS back to THREAD_SYNCH_NONE, it can cause problems. We * have everyone who might query in such a state use a trylock and assume * synch_perm is THREAD_SYNCH_NONE if the lock cannot be acquired. */ spinmutex_lock(tsd->synch_lock); tsd->synch_perm = state; spinmutex_unlock(tsd->synch_lock); } /* checks to see if any threads are waiting to synch with this one and waits * if they are * cur_state - a given permission define from above that describes the current * state of the caller * NOTE - Requires the caller is !could_be_linking (i.e. not in an * enter_couldbelinking state) */ void check_wait_at_safe_spot(dcontext_t *dcontext, thread_synch_permission_t cur_state) { thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; app_pc pc; byte cxt[MAX(CONTEXT_HEAP_SIZE_OPAQUE, sizeof(priv_mcontext_t))]; bool set_context = false; bool set_mcontext = false; if (tsd->pending_synch_count == 0 || cur_state == THREAD_SYNCH_NONE) return; ASSERT(tsd->pending_synch_count >= 0); pc = get_mcontext(dcontext)->pc; LOG(THREAD, LOG_SYNCH, 2, "waiting for synch with state %d (pc " PFX ")\n", cur_state, pc); if (cur_state == THREAD_SYNCH_VALID_MCONTEXT) { ASSERT(!is_dynamo_address(pc)); /* for detach must set this here and now */ IF_WINDOWS(IF_CLIENT_INTERFACE(set_last_error(dcontext->app_errno))); } spinmutex_lock(tsd->synch_lock); tsd->synch_perm = cur_state; /* Since can be killed, suspended, etc. must call the exit dr hook. But, to * avoid races, we must do so before giving up the synch_lock. This is why * that lock has to be in unprotected memory. FIXME - for single thread in * dr this will lead to rank order violation between dr exclusivity lock * and the synch_lock with no easy workaround (real deadlocks possible). * Luckily we'll prob. never use that option. */ if (INTERNAL_OPTION(single_thread_in_DR)) { ASSERT_NOT_IMPLEMENTED(false); } EXITING_DR(); /* Ref case 5074, for us/app to successfully SetThreadContext at * this synch point, this thread can NOT be at a system call. So, for * case 10101, we instead have threads that are waiting_at_safe_spot() * set their own contexts, allowing us to make system calls here. * We don't yet handle the detach case, so it still requires no system * calls, including the act of releasing the synch_lock * which is why that lock has to be a user mode spin yield lock. * FIXME: we could change tsd->synch_lock back to a regular lock * once we have detach handling system calls here. */ spinmutex_unlock(tsd->synch_lock); while (tsd->pending_synch_count > 0 && tsd->synch_perm != THREAD_SYNCH_NONE) { STATS_INC_DC(dcontext, synch_loops_wait_safe); #ifdef WINDOWS if (started_detach) { /* We spin for any non-detach synchs encountered during detach * since we have no flag telling us this synch is for detach. */ /* Ref case 5074, can NOT use os_thread_yield here. This must be a user * mode spin loop. */ SPINLOCK_PAUSE(); } else { #endif /* FIXME case 10100: replace this sleep/yield with a wait_for_event() */ synch_thread_yield(); #ifdef WINDOWS } #endif } /* Regain the synch_lock before ENTERING_DR to avoid races with getting * suspended/killed in the middle of ENTERING_DR (before synch_perm is * reset to NONE). */ /* Ref case 5074, for detach we still can NOT use os_thread_yield here (no system * calls) so don't allow the spinmutex_lock to yield while grabbing the lock. */ spinmutex_lock_no_yield(tsd->synch_lock); ENTERING_DR(); tsd->synch_perm = THREAD_SYNCH_NONE; if (tsd->set_mcontext != NULL || tsd->set_context != NULL) { IF_WINDOWS(ASSERT(!started_detach)); /* Make a local copy */ ASSERT(sizeof(cxt) >= sizeof(priv_mcontext_t)); if (tsd->set_mcontext != NULL) { set_mcontext = true; memcpy(cxt, tsd->set_mcontext, sizeof(*tsd->set_mcontext)); } else { set_context = true; memcpy(cxt, tsd->set_context, tsd->set_context_size); } synch_thread_free_setcontext(tsd); /* sets to NULL for us */ } spinmutex_unlock(tsd->synch_lock); LOG(THREAD, LOG_SYNCH, 2, "done waiting for synch with state %d (pc " PFX ")\n", cur_state, pc); if (set_mcontext || set_context) { /* FIXME: see comment in dispatch.c check_wait_at_safe_spot() call * about problems with KSTART(fcache_* differences bet the target * being at the synch point vs in the cache. */ if (set_mcontext) thread_set_self_mcontext((priv_mcontext_t *)cxt); else thread_set_self_context((void *)cxt); ASSERT_NOT_REACHED(); } } /* adjusts the pending synch count */ void adjust_wait_at_safe_spot(dcontext_t *dcontext, int amt) { thread_synch_data_t *tsd = (thread_synch_data_t *)dcontext->synch_field; ASSERT(tsd->pending_synch_count >= 0); spinmutex_lock(tsd->synch_lock); ATOMIC_ADD(int, tsd->pending_synch_count, amt); spinmutex_unlock(tsd->synch_lock); } /* Case 10101: Safely sets the context for a target thread that may be waiting at a * safe spot, in which case we do not want to directly do a setcontext as the return * from the yield or wait system call will mess up the state (case 5074). * Assumes that cxt was allocated on the global heap, and frees it, rather than * making its own copy (as an optimization). * Does not work on the executing thread. * Caller must hold thread_initexit_lock. * If used on behalf of the app, it's up to the caller to check for privileges. */ bool set_synched_thread_context(thread_record_t *trec, /* pass either mc or both cxt and cxt_size */ priv_mcontext_t *mc, void *cxt, size_t cxt_size, thread_synch_state_t desired_state _IF_X64(byte *cxt_alloc) _IF_WINDOWS(NTSTATUS *status /*OUT*/)) { bool res = true; ASSERT(trec != NULL && trec->dcontext != NULL); ASSERT(trec->dcontext != get_thread_private_dcontext()); ASSERT_OWN_MUTEX(true, &thread_initexit_lock); #ifdef WINDOWS if (status != NULL) *status = STATUS_SUCCESS; #endif if (waiting_at_safe_spot(trec, desired_state)) { /* case 10101: to allow system calls in check_wait_at_safe_spot() for * performance reasons we have the waiting thread perform its own setcontext. */ thread_synch_data_t *tsd = (thread_synch_data_t *)trec->dcontext->synch_field; spinmutex_lock(tsd->synch_lock); if (tsd->set_mcontext != NULL || tsd->set_context != NULL) { /* Two synchs in a row while still waiting; 2nd takes precedence */ STATS_INC(wait_multiple_setcxt); synch_thread_free_setcontext(tsd); } #ifdef WINDOWS LOG(THREAD_GET, LOG_SYNCH, 2, "set_synched_thread_context %d to pc " PFX " via %s\n", trec->id, (mc != NULL) ? mc->pc : (app_pc)((CONTEXT *)cxt)->CXT_XIP, (mc != NULL) ? "mc" : "CONTEXT"); #else ASSERT_NOT_IMPLEMENTED(mc != NULL); /* XXX: need sigcontext or sig_full_cxt_t */ #endif if (mc != NULL) tsd->set_mcontext = mc; else { ASSERT(cxt != NULL && cxt_size > 0); tsd->set_context = cxt; tsd->set_context_size = cxt_size; } IF_X64(tsd->set_context_alloc = cxt_alloc); ASSERT(THREAD_SYNCH_SAFE(tsd->synch_perm, desired_state)); ASSERT(tsd->pending_synch_count >= 0); /* Don't need to change pending_synch_count or anything; when thread is * resumed it will properly reset everything itself */ spinmutex_unlock(tsd->synch_lock); } else { if (mc != NULL) { res = thread_set_mcontext(trec, mc); } else { #ifdef WINDOWS /* sort of ugly: but NtSetContextThread handling needs the status */ if (status != NULL) { *status = nt_set_context(trec->handle, (CONTEXT *)cxt); res = NT_SUCCESS(*status); } else res = thread_set_context(trec->handle, (CONTEXT *)cxt); #else /* currently there are no callers who don't pass mc: presumably * PR 212090 will change that */ ASSERT_NOT_IMPLEMENTED(false); #endif } free_setcontext(mc, cxt, cxt_size _IF_X64(cxt_alloc)); } return res; } /* This is used to limit the maximum number of times synch_with_thread or * synch_with_all_threads spin yield loops while waiting on an exiting thread. * We assert if we ever break out of the loop because of this limit. FIXME make * sure this limit is large enough that if it does ever trigger it's because * of some kind of deadlock situation. Breaking out of the synchronization loop * early is a correctness issue. Right now the limits are large but arbitrary. * FIXME : once we are confident about thread synch get rid of these max loop checks. * N.B.: the THREAD_SYNCH_SMALL_LOOP_MAX flag causes us to divide these by 10. */ #define SYNCH_ALL_THREADS_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_all_threads_max_loops)) #define SYNCH_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_thread_max_loops)) /* Amt of time in ms to wait for threads to get to a safe spot per a loop, * see comments in synch_with_yield() on value. Our default value is 5ms which, * depending on the tick resolution could end up being as long as 10 ms. */ #define SYNCH_WITH_WAIT_MS ((int)DYNAMO_OPTION(synch_with_sleep_time)) /* for use by synch_with_* routines to wait for thread(s) */ static void synch_thread_yield() { /* xref 9400, 9488 - os_thread_yield() works ok on an UP machine, but on an MP machine * yield might not actually do anything (in which case we burn through to the max * loop counts pretty quick). We actually do want to wait a reasonable amt of time * since the target thread might be doing some long latency dr operation (like * dumping 500kb of registry into a forensics file) so we have the option to sleep * instead. */ uint num_procs = get_num_processors(); ASSERT(num_procs != 0); if ((num_procs == 1 && DYNAMO_OPTION(synch_thread_sleep_UP)) || (num_procs > 1 && DYNAMO_OPTION(synch_thread_sleep_MP))) { os_thread_sleep(SYNCH_WITH_WAIT_MS); } else { os_thread_yield(); } } /* returns a thread_synch_result_t value * id - the thread you want to synch with * block - whether or not should spin until synch is successful * hold_initexit_lock - whether or not the caller holds the thread_initexit_lock * caller_state - a given permission define from above that describes the * current state of the caller (note that holding the initexit * lock is ok with respect to NO_LOCK * desired_state - a requested state define from above that describes the * desired synchronization * flags - options from THREAD_SYNCH_ bitmask values * NOTE - if you hold the initexit_lock and block with greater than NONE for * caller state, then initexit_lock may be released and re-acquired * NOTE - if any of the nt_ routines fails, it is assumed the thread no longer * exists and returns true * NOTE - if called directly (i.e. not through synch_with_all_threads) * requires THREAD_SYNCH_IS_SAFE(caller_state, desired_state) to avoid deadlock * NOTE - Requires the caller is !could_be_linking (i.e. not in an * enter_couldbelinking state) * NOTE - you can't call this with a thread that you've already suspended */ thread_synch_result_t synch_with_thread(thread_id_t id, bool block, bool hold_initexit_lock, thread_synch_permission_t caller_state, thread_synch_state_t desired_state, uint flags) { thread_id_t my_id = get_thread_id(); uint loop_count = 0; int expect_exiting = 0; thread_record_t *my_tr = thread_lookup(my_id), *trec = NULL; dcontext_t *dcontext = NULL; priv_mcontext_t mc; thread_synch_result_t res = THREAD_SYNCH_RESULT_NOT_SAFE; bool first_loop = true; IF_UNIX(bool actually_suspended = true;) const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags) ? (SYNCH_MAXIMUM_LOOPS / 10) : SYNCH_MAXIMUM_LOOPS; ASSERT(id != my_id); /* Must set ABORT or IGNORE. Only caller can RETRY as need a new * set of threads for that, hoping problematic one is short-lived. */ ASSERT( TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)); if (my_tr != NULL) { dcontext = my_tr->dcontext; expect_exiting = dcontext->is_exiting ? 1 : 0; ASSERT(exiting_thread_count >= expect_exiting); } else { /* calling thread should always be a known thread */ ASSERT_NOT_REACHED(); } LOG(THREAD, LOG_SYNCH, 2, "Synching with thread " TIDFMT ", giving %d, requesting %d, blocking=%d\n", id, caller_state, desired_state, block); if (!hold_initexit_lock) mutex_lock(&thread_initexit_lock); while (true) { /* get thread record */ /* FIXME : thread id recycling is possible that this could be a * different thread, perhaps we should take handle instead of id * FIXME: use the new num field of thread_record_t? */ LOG(THREAD, LOG_SYNCH, 3, "Looping on synch with thread " TIDFMT "\n", id); trec = thread_lookup(id); /* We test the exiting thread count to avoid races between terminate/ * suspend thread (current thread, though we could be here for other * reasons) and an exiting thread (who might no longer be on the all * threads list) who is still using shared resources (ref case 3121) */ if ((trec == NULL && exiting_thread_count == expect_exiting) || loop_count++ > max_loops) { /* make sure we didn't exit the loop without synchronizing, FIXME : * in release builds we assume the synchronization is failing and * continue without it, but that is dangerous. * It is now up to the caller to handle this, and some use * small loop counts and abort on failure, so only a curiosity. */ ASSERT_CURIOSITY(loop_count < max_loops); LOG(THREAD, LOG_SYNCH, 3, "Exceeded loop count synching with thread " TIDFMT "\n", id); goto exit_synch_with_thread; } DOSTATS({ if (trec == NULL && exiting_thread_count > expect_exiting) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n"); STATS_INC(synch_yields_for_exiting_thread); } }); #ifdef UNIX if (trec != NULL && trec->execve) { /* i#237/PR 498284: clean up vfork "threads" that invoked execve. * There should be no race since vfork suspends the parent. */ res = THREAD_SYNCH_RESULT_SUCCESS; actually_suspended = false; break; } #endif if (trec != NULL) { if (first_loop) { adjust_wait_at_safe_spot(trec->dcontext, 1); first_loop = false; } if (!os_thread_suspend(trec)) { /* FIXME : eventually should be a real assert once we figure out * how to handle threads with low privilege handles */ /* For dr_api_exit, we may have missed a thread exit. */ ASSERT_CURIOSITY_ONCE( IF_APP_EXPORTS(dr_api_exit ||)(false && "Thead synch unable to suspend target" " thread, case 2096?")); res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) ? THREAD_SYNCH_RESULT_SUCCESS : THREAD_SYNCH_RESULT_SUSPEND_FAILURE); IF_UNIX(actually_suspended = false); break; } if (!thread_get_mcontext(trec, &mc)) { /* FIXME : eventually should be a real assert once we figure out * how to handle threads with low privilege handles */ ASSERT_CURIOSITY_ONCE(false && "Thead synch unable to get_context target" " thread, case 2096?"); res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) ? THREAD_SYNCH_RESULT_SUCCESS : THREAD_SYNCH_RESULT_SUSPEND_FAILURE); /* Make sure to not leave suspended if not returning success */ if (!TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)) os_thread_resume(trec); break; } if (at_safe_spot(trec, &mc, desired_state)) { /* FIXME: case 5325 for detach handling and testing */ IF_WINDOWS( ASSERT_NOT_IMPLEMENTED(!dcontext->aslr_context.sys_aslr_clobbered)); LOG(THREAD, LOG_SYNCH, 2, "Thread " TIDFMT " suspended in good spot\n", id); LOG(trec->dcontext->logfile, LOG_SYNCH, 2, "@@@@@@@@@@@@@@@@@@ SUSPENDED BY THREAD " TIDFMT " synch_with_thread " "@@@@@@@@@@@@@@@@@@\n", my_id); res = THREAD_SYNCH_RESULT_SUCCESS; break; } if (!os_thread_resume(trec)) { ASSERT_NOT_REACHED(); res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) ? THREAD_SYNCH_RESULT_SUCCESS : THREAD_SYNCH_RESULT_SUSPEND_FAILURE); break; } } /* don't loop if !block, before we ever release initexit_lock in case * caller is holding it and not blocking, (i.e. wants to keep it) */ if (!block) break; /* see if someone is waiting for us */ if (dcontext != NULL && caller_state != THREAD_SYNCH_NONE && should_wait_at_safe_spot(dcontext)) { if (trec != NULL) adjust_wait_at_safe_spot(trec->dcontext, -1); mutex_unlock(&thread_initexit_lock); /* ref case 5552, if we've inc'ed the exiting thread count need to * adjust it back before calling check_wait_at_safe_spot since we * may end up being killed there */ if (dcontext->is_exiting) { ASSERT(exiting_thread_count >= 1); ATOMIC_DEC(int, exiting_thread_count); } check_wait_at_safe_spot(dcontext, caller_state); if (dcontext->is_exiting) { ATOMIC_INC(int, exiting_thread_count); } mutex_lock(&thread_initexit_lock); trec = thread_lookup(id); /* Like above, we test the exiting thread count to avoid races * between terminate/suspend thread (current thread, though we * could be here for other reasons) and an exiting thread (who * might no longer be on the all threads list) who is still using * shared resources (ref case 3121) */ if (trec == NULL && exiting_thread_count == expect_exiting) { if (!hold_initexit_lock) mutex_unlock(&thread_initexit_lock); return THREAD_SYNCH_RESULT_SUCCESS; } DOSTATS({ if (trec == NULL && exiting_thread_count > expect_exiting) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n"); STATS_INC(synch_yields_for_exiting_thread); } }); if (trec != NULL) adjust_wait_at_safe_spot(trec->dcontext, 1); } STATS_INC(synch_yields); mutex_unlock(&thread_initexit_lock); /* Note - we only need call the ENTER/EXIT_DR hooks if single thread * in dr since we are not really exiting DR here (we just need to give * up the exclusion lock for a while to let thread we are trying to * synch with make progress towards a safe synch point). */ if (INTERNAL_OPTION(single_thread_in_DR)) EXITING_DR(); /* give up DR exclusion lock */ synch_thread_yield(); if (INTERNAL_OPTION(single_thread_in_DR)) ENTERING_DR(); /* re-gain DR exclusion lock */ mutex_lock(&thread_initexit_lock); } /* reset this back to before */ adjust_wait_at_safe_spot(trec->dcontext, -1); /* success!, is suspended (or already exited) put in desired state */ if (res == THREAD_SYNCH_RESULT_SUCCESS) { LOG(THREAD, LOG_SYNCH, 2, "Success synching with thread " TIDFMT " performing cleanup\n", id); if (THREAD_SYNCH_IS_TERMINATED(desired_state)) { if (IF_UNIX_ELSE(!trec->execve, true)) os_thread_terminate(trec); #ifdef UNIX /* We need to ensure the target thread has received the * signal and is no longer using its sigstack or ostd struct * before we clean those up. */ /* PR 452168: if failed to send suspend signal, do not spin */ if (actually_suspended) { if (!is_thread_terminated(trec->dcontext)) { /* i#96/PR 295561: use futex(2) if available. Blocks until * the thread gets terminated. */ os_wait_thread_terminated(trec->dcontext); } } else ASSERT(TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)); #endif } if (THREAD_SYNCH_IS_CLEANED(desired_state)) { dynamo_other_thread_exit(trec _IF_WINDOWS(false)); } } exit_synch_with_thread: if (!hold_initexit_lock) mutex_unlock(&thread_initexit_lock); return res; } /* desired_synch_state - a requested state define from above that describes * the synchronization required * threads, num_threads - must not be NULL, if !THREAD_SYNCH_IS_CLEANED(desired * synch_state) then will hold a list and num of threads * cur_state - a given permission from above that describes the state of the * caller * flags - options from THREAD_SYNCH_ bitmask values * NOTE - Requires that the caller doesn't hold the thread_initexit_lock, on * return caller will hold the thread_initexit_lock * NOTE - Requires the caller is !could_be_linking (i.e. not in an * enter_couldbelinking state) * NOTE - To avoid deadlock this routine should really only be called with * cur_state giving maximum permissions, (currently app_exit and detach could * conflict, except our routes to app_exit go through different synch point * (TermThread or TermProcess) first * NOTE - when !all_synched, if desired_synch_state is not cleaned or synch result is * ignored, the caller is reponsible for resuming threads that are suspended, * freeing allocation for threads array and releasing locks * Caller should call end_synch_with_all_threads when finished to accomplish that. */ bool synch_with_all_threads(thread_synch_state_t desired_synch_state, /*OUT*/ thread_record_t ***threads_out, /*OUT*/ int *num_threads_out, thread_synch_permission_t cur_state, /* FIXME: turn the ThreadSynch* enums into bitmasks and merge * into flags param */ uint flags) { /* Case 8815: we cannot use the OUT params themselves internally as they * may be volatile, so we need our own values until we're ready to return */ bool threads_are_stale = true; thread_record_t **threads = NULL; int num_threads = 0; /* we record ids from before we gave up thread_initexit_lock */ thread_id_t *thread_ids_temp = NULL; int num_threads_temp = 0, i, j, expect_exiting = 0; /* synch array contains a SYNCH_WITH_ALL_ value for each thread */ uint *synch_array = NULL, *synch_array_temp = NULL; enum { SYNCH_WITH_ALL_NEW = 0, SYNCH_WITH_ALL_NOTIFIED = 1, SYNCH_WITH_ALL_SYNCHED = 2, }; bool all_synched = false; thread_id_t my_id = get_thread_id(); uint loop_count = 0; thread_record_t *tr = thread_lookup(my_id); dcontext_t *dcontext = NULL; uint flags_one; /* flags for synch_with_thread() call */ thread_synch_result_t synch_res; const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags) ? (SYNCH_ALL_THREADS_MAXIMUM_LOOPS / 10) : SYNCH_ALL_THREADS_MAXIMUM_LOOPS; #ifdef CLIENT_INTERFACE /* We treat client-owned threads as native but they don't have a clean native state * for us to suspend them in (they are always in client or dr code). We need to be * able to suspend such threads so that they're !couldbelinking and holding no dr * locks. We make the assumption that client-owned threads that are in the client * library (or are in a dr routine that has set dcontext->client_thread_safe_to_sync) * meet this requirement (see at_safe_spot()). As such, all we need to worry about * here are client locks the client-owned thread might hold that could block other * threads from reaching safe spots. If we only suspend client-owned threads once * all other threads are taken care of then this is not a problem. FIXME - xref * PR 231301 on issues that arise if the client thread spends most of its time * calling out of its lib to dr API, ntdll, or generated code functions. */ bool finished_non_client_threads; #endif ASSERT(!dynamo_all_threads_synched); /* flag any caller who does not give up enough permissions to avoid livelock * with other synch_with_all_threads callers */ ASSERT_CURIOSITY(cur_state >= THREAD_SYNCH_NO_LOCKS_NO_XFER); /* also flag anyone asking for full mcontext w/o possibility of no_xfer, * which can also livelock */ ASSERT_CURIOSITY(desired_synch_state < THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT /* detach currently violates this: bug 8942 */ || started_detach); /* must set exactly one of these -- FIXME: better way to check? */ ASSERT( TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags)); flags_one = flags; /* we'll do the retry */ if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags)) { flags_one &= ~THREAD_SYNCH_SUSPEND_FAILURE_RETRY; flags_one |= THREAD_SYNCH_SUSPEND_FAILURE_ABORT; } if (tr != NULL) { dcontext = tr->dcontext; expect_exiting = dcontext->is_exiting ? 1 : 0; ASSERT(exiting_thread_count >= expect_exiting); } else { /* calling thread should always be a known thread */ ASSERT_NOT_REACHED(); } LOG(THREAD, LOG_SYNCH, 1, "synch with all threads my id = " SZFMT " Giving %d permission and seeking %d state\n", my_id, cur_state, desired_synch_state); /* grab all_threads_synch_lock */ /* since all_threads synch doesn't give any permissions this is necessary * to prevent deadlock in the case of two threads trying to synch with all * threads at the same time */ /* FIXME: for DEADLOCK_AVOIDANCE, to preserve LIFO, should we * exit DR, trylock, then immediately enter DR? introducing any * race conditions in doing so? * Ditto on all other os_thread_yields in this file! */ while (!mutex_trylock(&all_threads_synch_lock)) { LOG(THREAD, LOG_SYNCH, 2, "Spinning on all threads synch lock\n"); STATS_INC(synch_yields); if (dcontext != NULL && cur_state != THREAD_SYNCH_NONE && should_wait_at_safe_spot(dcontext)) { /* ref case 5552, if we've inc'ed the exiting thread count need to * adjust it back before calling check_wait_at_safe_spot since we * may end up being killed there */ if (dcontext->is_exiting) { ASSERT(exiting_thread_count >= 1); ATOMIC_DEC(int, exiting_thread_count); } check_wait_at_safe_spot(dcontext, cur_state); if (dcontext->is_exiting) { ATOMIC_INC(int, exiting_thread_count); } } LOG(THREAD, LOG_SYNCH, 2, "Yielding on all threads synch lock\n"); /* Note - we only need call the ENTER/EXIT_DR hooks if single thread * in dr since we are not really exiting DR here (we just need to give * up the exclusion lock for a while to let thread we are trying to * synch with make progress towards a safe synch point). */ if (INTERNAL_OPTION(single_thread_in_DR)) EXITING_DR(); /* give up DR exclusion lock */ os_thread_yield(); if (INTERNAL_OPTION(single_thread_in_DR)) ENTERING_DR(); /* re-gain DR exclusion lock */ } mutex_lock(&thread_initexit_lock); /* synch with all threads */ /* FIXME: this should be a do/while loop - then we wouldn't have * to initialize all the variables above */ while (threads_are_stale || !all_synched || exiting_thread_count > expect_exiting || uninit_thread_count > 0) { if (threads != NULL) { /* Case 8941: must free here rather than when yield (below) since * termination condition can change between there and here */ ASSERT(num_threads > 0); global_heap_free(threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT)); /* be paranoid */ threads = NULL; num_threads = 0; } get_list_of_threads(&threads, &num_threads); threads_are_stale = false; synch_array = (uint *)global_heap_alloc(num_threads * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT)); for (i = 0; i < num_threads; i++) { synch_array[i] = SYNCH_WITH_ALL_NEW; } /* Fixme : an inefficient algorithm, but is not as bad as it seems * since it is very unlikely that many threads have started or ended * and the list threads routine always puts them in the same order */ /* on first loop num_threads_temp == 0 */ for (i = 0; i < num_threads_temp; i++) { /* care only if we have already notified or synched thread */ if (synch_array_temp[i] != SYNCH_WITH_ALL_NEW) { for (j = 0; j < num_threads; j++) { /* FIXME : os recycles thread ids, should have stronger * check here, could check dcontext equivalence, (but we * recycle those to), probably should check threads_temp * handle and be sure thread is still alive since the id * won't be recycled then */ if (threads[j]->id == thread_ids_temp[i]) { synch_array[j] = synch_array_temp[i]; break; } } } } /* free old synch list, old thread id list */ if (num_threads_temp > 0) { global_heap_free(thread_ids_temp, num_threads_temp * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT)); global_heap_free(synch_array_temp, num_threads_temp * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT)); num_threads_temp = 0; } all_synched = true; LOG(THREAD, LOG_SYNCH, 3, "Looping over all threads (%d threads)\n", num_threads); #ifdef CLIENT_INTERFACE finished_non_client_threads = true; for (i = 0; i < num_threads; i++) { if (threads[i]->id != my_id && synch_array[i] != SYNCH_WITH_ALL_SYNCHED && !IS_CLIENT_THREAD(threads[i]->dcontext)) { finished_non_client_threads = false; break; } } #endif /* make a copy of the thread ids (can't just keep the thread list * since it consists of pointers to live thread_record_t structs). * we must make the copy before synching b/c cleaning up a thread * involves freeing its thread_record_t. */ thread_ids_temp = (thread_id_t *)global_heap_alloc( num_threads * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT)); for (i = 0; i < num_threads; i++) thread_ids_temp[i] = threads[i]->id; num_threads_temp = num_threads; synch_array_temp = synch_array; for (i = 0; i < num_threads; i++) { /* do not de-ref threads[i] after synching if it was cleaned up! */ if (synch_array[i] != SYNCH_WITH_ALL_SYNCHED && threads[i]->id != my_id) { #ifdef CLIENT_INTERFACE if (!finished_non_client_threads && IS_CLIENT_THREAD(threads[i]->dcontext)) { all_synched = false; continue; /* skip this thread for now till non-client are finished */ } if (IS_CLIENT_THREAD(threads[i]->dcontext) && (TEST(flags, THREAD_SYNCH_SKIP_CLIENT_THREAD) || !should_suspend_client_thread(threads[i]->dcontext, desired_synch_state))) { /* PR 609569: do not suspend this thread. * Avoid races between resume_all_threads() and * dr_client_thread_set_suspendable() by storing the fact. * * For most of our synchall purposes we really want to prevent * threads from acting on behalf of the application, and make * sure we can relocate them if in the code cache. DR itself is * thread-safe, and while a synchall-initiator will touch * thread-private data for threads it suspends, having some * threads it does not suspend shouldn't cause any problems so * long as it doesn't touch their thread-private data. */ synch_array[i] = SYNCH_WITH_ALL_SYNCHED; threads[i]->dcontext->client_data->left_unsuspended = true; continue; } #endif /* speed things up a tad */ if (synch_array[i] != SYNCH_WITH_ALL_NOTIFIED) { ASSERT(synch_array[i] == SYNCH_WITH_ALL_NEW); adjust_wait_at_safe_spot(threads[i]->dcontext, 1); synch_array[i] = SYNCH_WITH_ALL_NOTIFIED; } LOG(THREAD, LOG_SYNCH, 2, "About to try synch with thread #%d/%d " TIDFMT "\n", i, num_threads, threads[i]->id); synch_res = synch_with_thread(threads[i]->id, false, true, THREAD_SYNCH_NONE, desired_synch_state, flags_one); if (synch_res == THREAD_SYNCH_RESULT_SUCCESS) { LOG(THREAD, LOG_SYNCH, 2, "Synch succeeded!\n"); /* successful synch */ synch_array[i] = SYNCH_WITH_ALL_SYNCHED; if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state)) adjust_wait_at_safe_spot(threads[i]->dcontext, -1); } else { LOG(THREAD, LOG_SYNCH, 2, "Synch failed!\n"); all_synched = false; if (synch_res == THREAD_SYNCH_RESULT_SUSPEND_FAILURE) { if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) goto synch_with_all_abort; } else ASSERT(synch_res == THREAD_SYNCH_RESULT_NOT_SAFE); } } else { LOG(THREAD, LOG_SYNCH, 2, "Skipping synch with thread " TIDFMT "\n", thread_ids_temp[i]); } } if (loop_count++ >= max_loops) break; /* We test the exiting thread count to avoid races between exit * process (current thread, though we could be here for detach or other * reasons) and an exiting thread (who might no longer be on the all * threads list) who is still using shared resources (ref case 3121) */ if (!all_synched || exiting_thread_count > expect_exiting || uninit_thread_count > 0) { DOSTATS({ if (all_synched && exiting_thread_count > expect_exiting) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread %d %d %d\n", all_synched, exiting_thread_count, expect_exiting); STATS_INC(synch_yields_for_exiting_thread); } else if (all_synched && uninit_thread_count > 0) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an uninit thread %d %d\n", all_synched, uninit_thread_count); STATS_INC(synch_yields_for_uninit_thread); } }); STATS_INC(synch_yields); /* release lock in case some other thread waiting on it */ mutex_unlock(&thread_initexit_lock); LOG(THREAD, LOG_SYNCH, 2, "Not all threads synched looping again\n"); /* Note - we only need call the ENTER/EXIT_DR hooks if single * thread in dr since we are not really exiting DR here (we just * need to give up the exclusion lock for a while to let thread we * are trying to synch with make progress towards a safe synch * point). */ if (INTERNAL_OPTION(single_thread_in_DR)) EXITING_DR(); /* give up DR exclusion lock */ synch_thread_yield(); if (INTERNAL_OPTION(single_thread_in_DR)) ENTERING_DR(); /* re-gain DR exclusion lock */ mutex_lock(&thread_initexit_lock); /* We unlock and lock the thread_initexit_lock, so threads might be stale. */ threads_are_stale = true; } } /* case 9392: callers passing in ABORT expect a return value of failure * to correspond w/ no suspended threads, a freed threads array, and no * locks being held, so we go through the abort path */ if (!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) goto synch_with_all_abort; synch_with_all_exit: /* make sure we didn't exit the loop without synchronizing, FIXME : in * release builds we assume the synchronization is failing and continue * without it, but that is dangerous. * It is now up to the caller to handle this, and some use * small loop counts and abort on failure, so only a curiosity. */ ASSERT_CURIOSITY(loop_count < max_loops); ASSERT(threads != NULL); /* Since the set of threads can change we don't set the success field * until we're passing back the thread list. * We would use an tsd field directly instead of synch_array except * for THREAD_SYNCH_*_CLEAN where tsd is freed. */ ASSERT(synch_array != NULL); if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state)) { /* else unsafe to access tsd */ for (i = 0; i < num_threads; i++) { if (threads[i]->id != my_id) { thread_synch_data_t *tsd; ASSERT(threads[i]->dcontext != NULL); tsd = (thread_synch_data_t *)threads[i]->dcontext->synch_field; tsd->synch_with_success = (synch_array[i] == SYNCH_WITH_ALL_SYNCHED); } } } global_heap_free(synch_array, num_threads * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT)); if (num_threads_temp > 0) { global_heap_free(thread_ids_temp, num_threads_temp * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT)); } /* FIXME case 9333: on all_synch failure we do not free threads array if * synch_result is ignored. Callers are responsible for resuming threads that are * suspended and freeing allocation for threads array */ if ((!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) || THREAD_SYNCH_IS_CLEANED(desired_synch_state)) { global_heap_free( threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT)); threads = NULL; num_threads = 0; } LOG(THREAD, LOG_SYNCH, 1, "Finished synch with all threads: result=%d\n", all_synched); DOLOG(1, LOG_SYNCH, { if (all_synched) { LOG(THREAD, LOG_SYNCH, 1, "\treturning holding initexit_lock and all_threads_synch_lock\n"); } }); *threads_out = threads; *num_threads_out = num_threads; dynamo_all_threads_synched = all_synched; /* FIXME case 9392: where on all_synch failure we do not release the locks in the * non-abort exit path */ return all_synched; synch_with_all_abort: /* undo everything! */ for (i = 0; i < num_threads; i++) { DEBUG_DECLARE(bool ok;) if (threads[i]->id != my_id) { if (synch_array[i] == SYNCH_WITH_ALL_SYNCHED) { bool resume = true; #ifdef CLIENT_SIDELINE if (IS_CLIENT_THREAD(threads[i]->dcontext) && threads[i]->dcontext->client_data->left_unsuspended) { /* PR 609569: we did not suspend this thread */ resume = false; } #endif if (resume) { DEBUG_DECLARE(ok =) os_thread_resume(threads[i]); ASSERT(ok); } /* ensure synch_with_success is set to false on exit path, * even though locks are released and not fully valid */ synch_array[i] = SYNCH_WITH_ALL_NEW; } else if (synch_array[i] == SYNCH_WITH_ALL_NOTIFIED) { adjust_wait_at_safe_spot(threads[i]->dcontext, -1); } } } mutex_unlock(&thread_initexit_lock); mutex_unlock(&all_threads_synch_lock); ASSERT(!all_synched); /* ensure our OUT values will be NULL,0 for THREAD_SYNCH_SUSPEND_FAILURE_ABORT */ goto synch_with_all_exit; } /* Assumes that the threads were suspended with synch_with_all_threads() * and thus even is_thread_currently_native() threads were suspended. * Assumes that the caller will free up threads if it is dynamically allocated. */ void resume_all_threads(thread_record_t **threads, const uint num_threads) { uint i; thread_id_t my_tid; bool res; ASSERT_OWN_MUTEX(true, &all_threads_synch_lock); ASSERT_OWN_MUTEX(true, &thread_initexit_lock); if (threads == NULL || num_threads == 0) return; my_tid = get_thread_id(); for (i = 0; i < num_threads; i++) { if (my_tid == threads[i]->id) continue; #ifdef CLIENT_SIDELINE if (IS_CLIENT_THREAD(threads[i]->dcontext) && threads[i]->dcontext->client_data->left_unsuspended) { /* PR 609569: we did not suspend this thread */ threads[i]->dcontext->client_data->left_unsuspended = false; continue; } #endif /* This routine assumes that each thread in the array was suspended, so * each one has to successfully resume. */ res = os_thread_resume(threads[i]); ASSERT(res); } } /* Should be called to clean up after synch_with_all_threads as otherwise * dynamo_all_threads_synched will be left as true. * If resume is true, resumes the threads in the threads array. * Unlocks thread_initexit_lock and all_threads_synch_lock. * If threads != NULL, frees the threads array. */ void end_synch_with_all_threads(thread_record_t **threads, uint num_threads, bool resume) { /* dynamo_all_threads_synched will be false if synch failed */ ASSERT_CURIOSITY(dynamo_all_threads_synched); ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock)); dynamo_all_threads_synched = false; if (resume) { ASSERT(threads != NULL); resume_all_threads(threads, num_threads); } /* if we knew whether THREAD_SYNCH_*_CLEANED was specified we could set * synch_with_success to false, but it's unsafe otherwise */ mutex_unlock(&thread_initexit_lock); mutex_unlock(&all_threads_synch_lock); if (threads != NULL) { global_heap_free( threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT)); } } /* Resets a thread's context to start interpreting anew. * ASSUMPTION: the thread is currently suspended. * This was moved here from fcache_reset_all_caches_proactively simply to * get access to win32-private CONTEXT-related routines */ void translate_from_synchall_to_dispatch(thread_record_t *tr, thread_synch_state_t synch_state) { bool res; /* we do not have to align priv_mcontext_t */ priv_mcontext_t *mc = global_heap_alloc(sizeof(*mc) HEAPACCT(ACCT_OTHER)); bool free_cxt = true; dcontext_t *dcontext = tr->dcontext; app_pc pre_translation; ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock)); /* FIXME: would like to assert that suspendcount is > 0 but how? */ ASSERT(thread_synch_successful(tr)); res = thread_get_mcontext(tr, mc); ASSERT(res); pre_translation = (app_pc)mc->pc; LOG(GLOBAL, LOG_CACHE, 2, "\trecreating address for " PFX "\n", mc->pc); LOG(THREAD, LOG_CACHE, 2, "translate_from_synchall_to_dispatch: being translated from " PFX "\n", mc->pc); if (get_at_syscall(dcontext)) { /* Don't need to do anything as shared_syscall and do_syscall will not * change due to a reset and will have any inlined ibl updated. If we * did try to send these guys back to dispatch, have to set asynch_tag * (as well as next_tag since translation looks only at that), restore * TOS to asynch_target/esi (unless still at reset state), and have to * figure out how to avoid post-syscall processing for those who never * did pre-syscall processing (i.e., if at shared_syscall) (else will * get wrong dcontext->sysnum, etc.) * Not to mention that after resuming the kernel will finish the * syscall and clobber several registers, making it hard to set a * clean state (xref case 6113, case 5074, and notes below)! * It's just too hard to redirect while at a syscall. */ LOG(GLOBAL, LOG_CACHE, 2, "\tat syscall so not translating\n"); /* sanity check */ ASSERT(is_after_syscall_address(dcontext, pre_translation) || IF_WINDOWS_ELSE(pre_translation == vsyscall_after_syscall, is_after_or_restarted_do_syscall(dcontext, pre_translation, true /*vsys*/))); #if defined(UNIX) && defined(X86_32) if (pre_translation == vsyscall_sysenter_return_pc || pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc) { /* Because we remove the vsyscall hook on a send_all_other_threads_native() * yet have no barrier to know the threads have run their own go-native * code, we want to send them away from the hook, to our gencode. */ if (pre_translation == vsyscall_sysenter_return_pc) mc->pc = after_do_shared_syscall_addr(dcontext); else if (pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc) mc->pc = get_do_int_syscall_entry(dcontext); /* exit stub and subsequent fcache_return will save rest of state */ res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0, synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL)); ASSERT(res); /* cxt is freed by set_synched_thread_context() or target thread */ free_cxt = false; } #endif IF_ARM({ if (INTERNAL_OPTION(steal_reg_at_reset) != 0) { /* We don't want to translate, just update the stolen reg values */ arch_mcontext_reset_stolen_reg(dcontext, mc); res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0, synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL)); ASSERT(res); /* cxt is freed by set_synched_thread_context() or target thread */ free_cxt = false; } }); } else { res = translate_mcontext(tr, mc, true /*restore memory*/, NULL); ASSERT(res); if (!thread_synch_successful(tr) || mc->pc == 0) { /* Better to risk failure on accessing a freed cache than * to have a guaranteed crash by sending to NULL. * FIXME: it's possible the real translation is NULL, * but if so should be fine to leave it there since the * current eip should also be NULL. */ ASSERT_NOT_REACHED(); goto translate_from_synchall_to_dispatch_exit; } LOG(GLOBAL, LOG_CACHE, 2, "\ttranslation pc = " PFX "\n", mc->pc); ASSERT(!is_dynamo_address((app_pc)mc->pc) && !in_fcache((app_pc)mc->pc)); IF_ARM({ if (INTERNAL_OPTION(steal_reg_at_reset) != 0) { /* XXX: do we need this? Will signal.c will fix it up prior * to sigreturn from suspend handler? */ arch_mcontext_reset_stolen_reg(dcontext, mc); } }); /* We send all threads, regardless of whether was in DR or not, to * re-interp from translated cxt, to avoid having to handle stale * local state problems if we simply resumed. * We assume no KSTATS or other state issues to deal with. * FIXME: enter hook w/o an exit? */ dcontext->next_tag = (app_pc)mc->pc; /* FIXME PR 212266: for linux if we're at an inlined syscall * we may have problems: however, we might be able to rely on the kernel * not clobbering any registers besides eax (which is ok: reset stub * handles it), though presumably it's allowed to write to any * caller-saved registers. We may need to change inlined syscalls * to set at_syscall (see comments below as well). */ if (pre_translation == IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_sysenter_return_pc) && !waiting_at_safe_spot(dcontext->thread_record, synch_state)) { /* FIXME case 7827/PR 212266: shouldn't translate for this case, right? * should have -ignore_syscalls set at_syscall and eliminate * this whole block of code */ /* put the proper retaddr back on the stack, as we won't * be doing the ret natively to regain control, but rather * will interpret it */ /* FIXME: ensure readable and writable? */ app_pc cur_retaddr = *((app_pc *)mc->xsp); app_pc native_retaddr; ASSERT(cur_retaddr != NULL); /* must be ignore_syscalls (else, at_syscall will be set) */ IF_WINDOWS(ASSERT(DYNAMO_OPTION(ignore_syscalls))); ASSERT(get_syscall_method() == SYSCALL_METHOD_SYSENTER); /* For DYNAMO_OPTION(sygate_sysenter) we need to restore both stack * values and fix up esp, but we can't do it here since the kernel * will change esp... incompatible w/ -ignore_syscalls anyway */ IF_WINDOWS(ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter))); /* may still be at syscall from a prior reset -- don't want to grab * locks for in_fcache so we determine via the translation */ ASSERT_NOT_TESTED(); native_retaddr = recreate_app_pc(dcontext, cur_retaddr, NULL); if (native_retaddr != cur_retaddr) { LOG(GLOBAL, LOG_CACHE, 2, "\trestoring TOS to " PFX " from " PFX "\n", native_retaddr, cur_retaddr); *((app_pc *)mc->xsp) = native_retaddr; } else { LOG(GLOBAL, LOG_CACHE, 2, "\tnot restoring TOS since still at previous reset state " PFX "\n", cur_retaddr); } } /* Send back to dispatch. Rather than setting up last_exit in eax here, * we point to a special routine to save the correct eax -- in fact it's * simply a direct exit stub. Originally this was b/c we tried to * translate threads at system calls, and the kernel clobbers eax (and * ecx/edx for sysenter, though preserves eip setcontext change: case * 6113, case 5074) in finishing the system call, but now that we don't * translate them we've kept the stub approach. It's actually faster * for the stub itself to save eax and set the linkstub than for us to * emulate it here, anyway. * Note that a thread in check_wait_at_safe_spot() spins and will NOT be * at a syscall, avoiding problems there (case 5074). */ mc->pc = (app_pc)get_reset_exit_stub(dcontext); LOG(GLOBAL, LOG_CACHE, 2, "\tsent to reset exit stub " PFX "\n", mc->pc); /* make dispatch happy */ dcontext->whereami = DR_WHERE_FCACHE; #ifdef WINDOWS /* i#25: we could have interrupted thread in DR, where has priv fls data * in TEB, and fcache_return blindly copies into app fls: so swap to app * now, just in case. DR routine can handle swapping when already app. */ swap_peb_pointer(dcontext, false /*to app*/); #endif /* exit stub and subsequent fcache_return will save rest of state */ res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0, synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL)); ASSERT(res); /* cxt is freed by set_synched_thread_context() or target thread */ free_cxt = false; } translate_from_synchall_to_dispatch_exit: if (free_cxt) { global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER)); } } /*************************************************************************** * Detach and similar operations */ /* Atomic variable to prevent multiple threads from trying to detach at * the same time. */ DECLARE_CXTSWPROT_VAR(static volatile int dynamo_detaching_flag, LOCK_FREE_STATE); void send_all_other_threads_native(void) { thread_record_t **threads; dcontext_t *my_dcontext = get_thread_private_dcontext(); int i, num_threads; bool waslinking; /* We're forced to use an asynch model due to not being able to call * dynamo_thread_not_under_dynamo, which has a bonus of making it easier * to handle other threads asking for synchall. * This is why we don't ask for THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT. */ const thread_synch_state_t desired_state = THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER; ASSERT(dynamo_initialized && !dynamo_exited && my_dcontext != NULL); LOG(my_dcontext->logfile, LOG_ALL, 1, "%s\n", __FUNCTION__); LOG(GLOBAL, LOG_ALL, 1, "%s: cur thread " TIDFMT "\n", __FUNCTION__, get_thread_id()); waslinking = is_couldbelinking(my_dcontext); if (waslinking) enter_nolinking(my_dcontext, NULL, false); #ifdef WINDOWS /* Ensure new threads will go straight to native */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); init_apc_go_native_pause = true; init_apc_go_native = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); # ifdef CLIENT_INTERFACE wait_for_outstanding_nudges(); # endif #endif /* Suspend all threads except those trying to synch with us */ if (!synch_with_all_threads(desired_state, &threads, &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER, THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) { REPORT_FATAL_ERROR_AND_EXIT(my_dcontext, FAILED_TO_SYNCHRONIZE_THREADS, 2, get_application_name(), get_application_pid()); } ASSERT(mutex_testlock(&all_threads_synch_lock) && mutex_testlock(&thread_initexit_lock)); #ifdef WINDOWS /* Let threads waiting at APC point go native */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); init_apc_go_native_pause = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); #endif #ifdef WINDOWS /* FIXME i#95: handle outstanding callbacks where we've put our retaddr on * the app stack. This should be able to share * detach_helper_handle_callbacks() code. Won't the old single-thread * dr_app_stop() have had this same problem? Since we're not tearing * everything down, can we solve it by waiting until we hit * after_shared_syscall_code_ex() in a native thread? */ ASSERT_NOT_IMPLEMENTED(get_syscall_method() != SYSCALL_METHOD_SYSENTER); #endif for (i = 0; i < num_threads; i++) { if (threads[i]->dcontext == my_dcontext || is_thread_currently_native(threads[i]) || /* FIXME i#2784: we should suspend client threads for the duration * of the app being native to avoid problems with having no * signal handlers in place. */ IS_CLIENT_THREAD(threads[i]->dcontext)) continue; /* Because dynamo_thread_not_under_dynamo() has to be run by the owning * thread, the simplest solution is to send everyone back to dispatch * with a flag to go native from there, rather than directly setting the * native context. */ threads[i]->dcontext->go_native = true; if (thread_synch_state_no_xfer(threads[i]->dcontext)) { /* Another thread trying to synch with us: just let it go. It will * go native once it gets back to dispatch which will be before it * goes into the cache. */ continue; } else { LOG(my_dcontext->logfile, LOG_ALL, 1, "%s: sending thread %d native\n", __FUNCTION__, threads[i]->id); LOG(threads[i]->dcontext->logfile, LOG_ALL, 1, "**** requested by thread %d to go native\n", my_dcontext->owning_thread); /* This won't change a thread at a syscall, so we rely on the thread * going to dispatch and then going native when its syscall exits. * * FIXME i#95: That means the time to go native is, unfortunately, * unbounded. This means that dr_app_cleanup() needs to synch the * threads and force-xl8 these. We should share code with detach. * Right now we rely on the app joining all its threads *before* * calling dr_app_cleanup(), or using dr_app_stop_and_cleanup[_with_stats](). * This also means we have a race with unhook_vsyscall in * os_process_not_under_dynamorio(), which we solve by redirecting * threads at syscalls to our gencode. */ translate_from_synchall_to_dispatch(threads[i], desired_state); } } end_synch_with_all_threads(threads, num_threads, true /*resume*/); os_process_not_under_dynamorio(my_dcontext); if (waslinking) enter_couldbelinking(my_dcontext, NULL, false); return; } void detach_on_permanent_stack(bool internal, bool do_cleanup, dr_stats_t *drstats) { dcontext_t *my_dcontext; thread_record_t **threads; thread_record_t *my_tr = NULL; int i, num_threads, my_idx = -1; thread_id_t my_id; #ifdef WINDOWS bool detach_stacked_callbacks; bool *cleanup_tpc; #endif DEBUG_DECLARE(bool ok;) DEBUG_DECLARE(int exit_res;) /* synch-all flags: */ uint flags = 0; #ifdef WINDOWS /* For Windows we may fail to suspend a thread (e.g., privilege * problems), and in that case we want to just ignore the failure. */ flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE; #elif defined(UNIX) /* For Unix, such privilege problems are rarer but we would still prefer to * continue if we hit a problem. */ flags |= THREAD_SYNCH_SUSPEND_FAILURE_IGNORE; #endif /* i#297: we only synch client threads after process exit event. */ flags |= THREAD_SYNCH_SKIP_CLIENT_THREAD; ENTERING_DR(); /* dynamo_detaching_flag is not really a lock, and since no one ever waits * on it we can't deadlock on it either. */ if (!atomic_compare_exchange(&dynamo_detaching_flag, LOCK_FREE_STATE, LOCK_SET_STATE)) return; /* Unprotect .data for exit cleanup. * XXX: more secure to not do this until we've synched, but then need * alternative prot for started_detach and init_apc_go_native* */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); ASSERT(!started_detach); started_detach = true; if (!internal) { synchronize_dynamic_options(); if (!DYNAMO_OPTION(allow_detach)) { started_detach = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); dynamo_detaching_flag = LOCK_FREE_STATE; SYSLOG_INTERNAL_ERROR("Detach called without the allow_detach option set"); EXITING_DR(); return; } } ASSERT(dynamo_initialized); ASSERT(!dynamo_exited); my_id = get_thread_id(); my_dcontext = get_thread_private_dcontext(); if (my_dcontext == NULL) { /* We support detach after just dr_app_setup() with no start. */ ASSERT(!dynamo_started); my_tr = thread_lookup(my_id); ASSERT(my_tr != NULL); my_dcontext = my_tr->dcontext; os_process_under_dynamorio_initiate(my_dcontext); os_process_under_dynamorio_complete(my_dcontext); dynamo_thread_under_dynamo(my_dcontext); ASSERT(get_thread_private_dcontext() == my_dcontext); } ASSERT(my_dcontext != NULL); LOG(GLOBAL, LOG_ALL, 1, "Detach: thread %d starting detach process\n", my_id); SYSLOG(SYSLOG_INFORMATION, INFO_DETACHING, 2, get_application_name(), get_application_pid()); /* synch with flush */ if (my_dcontext != NULL) enter_threadexit(my_dcontext); #ifdef WINDOWS /* Signal to go native at APC init here. Set pause first so that threads * will wait till we are ready for them to go native (after ntdll unpatching). * (To avoid races these must be set in this order!) */ init_apc_go_native_pause = true; init_apc_go_native = true; /* XXX i#2611: there is still a race for threads caught between init_apc_go_native * and dynamo_thread_init adding to all_threads: this just reduces the risk. * Unfortunately we can't easily use the UNIX solution of uninit_thread_count * since we can't distinguish internally vs externally created threads. */ os_thread_yield(); # ifdef CLIENT_INTERFACE wait_for_outstanding_nudges(); # endif #endif /* suspend all DR-controlled threads at safe locations */ if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT, &threads, &num_threads, /* Case 6821: allow other synch-all-thread uses * that beat us to not wait on us. We still have * a problem if we go first since we must xfer * other threads. */ THREAD_SYNCH_NO_LOCKS_NO_XFER, flags)) { REPORT_FATAL_ERROR_AND_EXIT(my_dcontext, FAILED_TO_SYNCHRONIZE_THREADS, 2, get_application_name(), get_application_pid()); } /* Now we own the thread_initexit_lock. We'll release the locks grabbed in * synch_with_all_threads below after cleaning up all the threads in case we * need to grab it during process exit cleanup. */ ASSERT(mutex_testlock(&all_threads_synch_lock) && mutex_testlock(&thread_initexit_lock)); ASSERT(!doing_detach); doing_detach = true; #ifdef HOT_PATCHING_INTERFACE /* In hotp_only mode, we must remove patches when detaching; we don't want * to leave in all our hooks and detach; that will definitely crash the app. */ if (DYNAMO_OPTION(hotp_only)) hotp_only_detach_helper(); #endif #ifdef WINDOWS /* XXX: maybe we should re-check for additional threads that passed the init_apc * lock but weren't yet initialized and so didn't show up on the list? */ LOG(GLOBAL, LOG_ALL, 1, "Detach : about to unpatch ntdll.dll and fix memory permissions\n"); detach_remove_image_entry_hook(num_threads, threads); if (!INTERNAL_OPTION(noasynch)) { /* We have to do this here, before client exit events, as we're letting * threads go native next. We thus will not detect crashes during client * exit during detach. */ callback_interception_unintercept(); } #endif if (!DYNAMO_OPTION(thin_client)) revert_memory_regions(); #ifdef UNIX unhook_vsyscall(); #endif LOG(GLOBAL, LOG_ALL, 1, "Detach : unpatched ntdll.dll and fixed memory permissions\n"); #ifdef WINDOWS /* Release the APC init lock and let any threads waiting there go native */ LOG(GLOBAL, LOG_ALL, 1, "Detach : Releasing init_apc_go_native_pause\n"); init_apc_go_native_pause = false; #else /* i#2270: we ignore alarm signals during detach to reduce races. */ signal_remove_alarm_handlers(my_dcontext); #endif /* perform exit tasks that require full thread data structs */ dynamo_process_exit_with_thread_info(); #ifdef WINDOWS /* We need to record a bool indicating whether we can free each thread's * resources fully or whether we need them for callback cleanup. */ cleanup_tpc = (bool *)global_heap_alloc(num_threads * sizeof(bool) HEAPACCT(ACCT_OTHER)); /* Handle any outstanding callbacks */ detach_stacked_callbacks = detach_handle_callbacks(num_threads, threads, cleanup_tpc); #endif LOG(GLOBAL, LOG_ALL, 1, "Detach: starting to translate contexts\n"); for (i = 0; i < num_threads; i++) { priv_mcontext_t mc; if (threads[i]->dcontext == my_dcontext) { my_idx = i; my_tr = threads[i]; continue; } else if (IS_CLIENT_THREAD(threads[i]->dcontext)) { /* i#297 we will kill client-owned threads later after app exit events * in dynamo_shared_exit(). */ continue; } else if (detach_do_not_translate(threads[i])) { LOG(GLOBAL, LOG_ALL, 2, "Detach: not translating " TIDFMT "\n", threads[i]->id); } else { LOG(GLOBAL, LOG_ALL, 2, "Detach: translating " TIDFMT "\n", threads[i]->id); DEBUG_DECLARE(ok =) thread_get_mcontext(threads[i], &mc); ASSERT(ok); /* FIXME i#95: this will xl8 to a post-syscall point for a thread at * a syscall, and we rely on the app itself to retry a syscall interrupted * by our suspend signal. This is not good enough, as this is an * artifical signal that the app has not planned for with SA_RESTART or * a loop. We want something like adjust_syscall_for_restart(). * Xref i#1145. */ DEBUG_DECLARE(ok =) translate_mcontext(threads[i], &mc, true /*restore mem*/, NULL /*f*/); ASSERT(ok); if (!threads[i]->under_dynamo_control) { LOG(GLOBAL, LOG_ALL, 1, "Detach : thread " TIDFMT " already running natively\n", threads[i]->id); /* we do need to restore the app ret addr, for native_exec */ if (!DYNAMO_OPTION(thin_client) && DYNAMO_OPTION(native_exec) && !vmvector_empty(native_exec_areas)) { put_back_native_retaddrs(threads[i]->dcontext); } } detach_finalize_translation(threads[i], &mc); LOG(GLOBAL, LOG_ALL, 1, "Detach: pc=" PFX " for thread " TIDFMT "\n", mc.pc, threads[i]->id); ASSERT(!is_dynamo_address(mc.pc) && !in_fcache(mc.pc)); /* XXX case 7457: if the thread is suspended after it received a fault * but before the kernel copied the faulting context to the user mode * structures for the handler, it could result in a codemod exception * that wouldn't happen natively! */ DEBUG_DECLARE(ok =) thread_set_mcontext(threads[i], &mc); ASSERT(ok); /* i#249: restore app's PEB/TEB fields */ IF_WINDOWS(restore_peb_pointer_for_thread(threads[i]->dcontext)); } /* Resumes the thread, which will do kernel-visible cleanup of * signal state. Resume happens within the synch_all region where * the thread_initexit_lock is held so that we can clean up thread * data later. */ #ifdef UNIX os_signal_thread_detach(threads[i]->dcontext); #endif LOG(GLOBAL, LOG_ALL, 1, "Detach: thread " TIDFMT " is being resumed as native\n", threads[i]->id); os_thread_resume(threads[i]); } ASSERT(my_idx != -1 || !internal); #ifdef UNIX LOG(GLOBAL, LOG_ALL, 1, "Detach: waiting for threads to fully detach\n"); for (i = 0; i < num_threads; i++) { if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) os_wait_thread_detached(threads[i]->dcontext); } #endif if (!do_cleanup) return; /* Clean up each thread now that everyone has gone native. Needs to be * done with the thread_initexit_lock held, which is true within a synched * region. */ for (i = 0; i < num_threads; i++) { if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) { LOG(GLOBAL, LOG_ALL, 1, "Detach: cleaning up thread " TIDFMT " %s\n", threads[i]->id, IF_WINDOWS_ELSE(cleanup_tpc[i] ? "and its TPC" : "", "")); dynamo_other_thread_exit(threads[i] _IF_WINDOWS(!cleanup_tpc[i])); } } if (my_idx != -1) { /* pre-client thread cleanup (PR 536058) */ dynamo_thread_exit_pre_client(my_dcontext, my_tr->id); } LOG(GLOBAL, LOG_ALL, 1, "Detach: Letting slave threads go native\n"); #ifdef WINDOWS global_heap_free(cleanup_tpc, num_threads * sizeof(bool) HEAPACCT(ACCT_OTHER)); /* XXX: there's a possible race if a thread waiting at APC is still there * when we unload our dll. */ os_thread_yield(); #endif end_synch_with_all_threads(threads, num_threads, false /*don't resume */); threads = NULL; LOG(GLOBAL, LOG_ALL, 1, "Detach: Entering final cleanup and unload\n"); SYSLOG_INTERNAL_INFO("Detaching from process, entering final cleanup"); if (drstats != NULL) stats_get_snapshot(drstats); DEBUG_DECLARE(exit_res =) dynamo_shared_exit(my_tr _IF_WINDOWS(detach_stacked_callbacks)); ASSERT(exit_res == SUCCESS); detach_finalize_cleanup(); stack_free(initstack, DYNAMORIO_STACK_SIZE); dynamo_exit_post_detach(); doing_detach = false; started_detach = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); dynamo_detaching_flag = LOCK_FREE_STATE; EXITING_DR(); }
1
14,051
Better to call this before EXITING_DR which is supposed to be last.
DynamoRIO-dynamorio
c
@@ -295,8 +295,6 @@ public class MainActivity extends BaseActivity implements CustomTabActivityHelpe switch ((int) drawerItem.getIdentifier()) { case ITEM_HOME: fragment = new HomeFragment(); - // recreate when Home is pressed - recreate(); break; case ITEM_SEARCH_BY_CODE: fragment = new FindProductFragment();
1
package openfoodfacts.github.scrachx.openfood.views; import android.Manifest; import android.app.LoaderManager; import android.app.SearchManager; import android.content.ActivityNotFoundException; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.SharedPreferences; import android.content.pm.ActivityInfo; import android.content.pm.PackageManager; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Color; import android.hardware.Sensor; import android.hardware.SensorManager; import android.net.ConnectivityManager; import android.net.NetworkInfo; import android.net.Uri; import android.os.Build; import android.os.Bundle; import android.preference.PreferenceManager; import android.provider.Settings; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.support.customtabs.CustomTabsIntent; import android.support.v4.app.ActivityCompat; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentManager; import android.support.v4.content.ContextCompat; import android.support.v4.view.MenuItemCompat; import android.support.v7.app.AlertDialog; import android.support.v7.widget.SearchView; import android.support.v7.widget.Toolbar; import android.util.Log; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.widget.EditText; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.Toast; import com.afollestad.materialdialogs.MaterialDialog; import com.google.zxing.BinaryBitmap; import com.google.zxing.ChecksumException; import com.google.zxing.DecodeHintType; import com.google.zxing.FormatException; import com.google.zxing.LuminanceSource; import com.google.zxing.MultiFormatReader; import com.google.zxing.NotFoundException; import com.google.zxing.RGBLuminanceSource; import com.google.zxing.Reader; import com.google.zxing.Result; import com.google.zxing.common.HybridBinarizer; import com.mikepenz.fastadapter.commons.utils.RecyclerViewCacheUtil; import com.mikepenz.google_material_typeface_library.GoogleMaterial; import com.mikepenz.materialdrawer.AccountHeader; import com.mikepenz.materialdrawer.AccountHeaderBuilder; import com.mikepenz.materialdrawer.Drawer; import com.mikepenz.materialdrawer.DrawerBuilder; import com.mikepenz.materialdrawer.holder.BadgeStyle; import com.mikepenz.materialdrawer.holder.StringHolder; import com.mikepenz.materialdrawer.model.DividerDrawerItem; import com.mikepenz.materialdrawer.model.PrimaryDrawerItem; import com.mikepenz.materialdrawer.model.ProfileDrawerItem; import com.mikepenz.materialdrawer.model.ProfileSettingDrawerItem; import com.mikepenz.materialdrawer.model.SectionDrawerItem; import com.mikepenz.materialdrawer.model.interfaces.IDrawerItem; import com.mikepenz.materialdrawer.model.interfaces.IProfile; import java.io.File; import java.io.FileNotFoundException; import java.io.InputStream; import java.util.ArrayList; import java.util.Hashtable; import java.util.List; import java.util.Set; import butterknife.BindView; import openfoodfacts.github.scrachx.openfood.BuildConfig; import openfoodfacts.github.scrachx.openfood.R; import openfoodfacts.github.scrachx.openfood.fragments.AllergensAlertFragment; import openfoodfacts.github.scrachx.openfood.fragments.FindProductFragment; import openfoodfacts.github.scrachx.openfood.fragments.HomeFragment; import openfoodfacts.github.scrachx.openfood.fragments.OfflineEditFragment; import openfoodfacts.github.scrachx.openfood.fragments.PreferencesFragment; import openfoodfacts.github.scrachx.openfood.models.ProductImage; import openfoodfacts.github.scrachx.openfood.models.LabelName; import openfoodfacts.github.scrachx.openfood.models.LabelNameDao; import openfoodfacts.github.scrachx.openfood.models.SendProductDao; import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIClient; import openfoodfacts.github.scrachx.openfood.utils.LocaleHelper; import openfoodfacts.github.scrachx.openfood.utils.NavigationDrawerListener; import openfoodfacts.github.scrachx.openfood.utils.SearchType; import openfoodfacts.github.scrachx.openfood.utils.ShakeDetector; import openfoodfacts.github.scrachx.openfood.utils.Utils; import openfoodfacts.github.scrachx.openfood.views.category.activity.CategoryActivity; import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabActivityHelper; import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabsHelper; import openfoodfacts.github.scrachx.openfood.views.customtabs.WebViewFallback; import static openfoodfacts.github.scrachx.openfood.models.ProductImageField.OTHER; import static org.apache.commons.lang3.StringUtils.isNotEmpty; public class MainActivity extends BaseActivity implements CustomTabActivityHelper.ConnectionCallback, NavigationDrawerListener, SharedPreferences.OnSharedPreferenceChangeListener { private static final int LOGIN_REQUEST = 1; private static final long USER_ID = 500; private static final String CONTRIBUTIONS_SHORTCUT = "CONTRIBUTIONS"; private static final String SCAN_SHORTCUT = "SCAN"; private static final String BARCODE_SHORTCUT = "BARCODE"; @BindView(R.id.toolbar) Toolbar toolbar; private AccountHeader headerResult = null; private Drawer result = null; private MenuItem searchMenuItem; private CustomTabActivityHelper customTabActivityHelper; private CustomTabsIntent customTabsIntent; private Uri userAccountUri; private Uri contributeUri; private Uri discoverUri; private Uri userContributeUri; private SendProductDao mSendProductDao; private LabelNameDao labelNameDao; private int numberOFSavedProducts; private SharedPreferences mSharedPref; PrimaryDrawerItem primaryDrawerItem; private int positionOfOfflineBadeItem; private String mBarcode; private SensorManager mSensorManager; private Sensor mAccelerometer; private ShakeDetector mShakeDetector; // boolean to determine if scan on shake feature should be enabled private boolean scanOnShake; private SharedPreferences shakePreference; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); if (getResources().getBoolean(R.bool.portrait_only)) { setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT); } setContentView(R.layout.activity_main); shakePreference = PreferenceManager.getDefaultSharedPreferences(this); scanOnShake = shakePreference.getBoolean("shakeScanMode", false); Utils.hideKeyboard(this); final IProfile<ProfileDrawerItem> profile = getUserProfile(); LocaleHelper.setLocale(this, LocaleHelper.getLanguage(this)); setSupportActionBar(toolbar); getSupportActionBar().setDisplayHomeAsUpEnabled(false); Bundle extras = getIntent().getExtras(); FragmentManager fragmentManager = getSupportFragmentManager(); mSendProductDao = Utils.getAppDaoSession(MainActivity.this).getSendProductDao(); numberOFSavedProducts = mSendProductDao.loadAll().size(); // Get the user preference for scan on shake feature and open ScannerFragmentActivity if the user has enabled the feature mSensorManager = (SensorManager) getSystemService(Context.SENSOR_SERVICE); mAccelerometer = mSensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER); mShakeDetector = new ShakeDetector(); Log.i("Shake", String.valueOf(scanOnShake)); mShakeDetector.setOnShakeListener(new ShakeDetector.OnShakeDetected() { @Override public void onShake(int count) { if (scanOnShake) { Utils.scan(MainActivity.this); } } }); fragmentManager.addOnBackStackChangedListener(new FragmentManager.OnBackStackChangedListener() { @Override public void onBackStackChanged() { } }); boolean isOpenOfflineEdit = extras != null && extras.getBoolean("openOfflineEdit"); if (isOpenOfflineEdit) { fragmentManager.beginTransaction().replace(R.id.fragment_container, new OfflineEditFragment()).commit(); getSupportActionBar().setTitle(getResources().getString(R.string.offline_edit_drawer)); } else { fragmentManager.beginTransaction().replace(R.id.fragment_container, new HomeFragment ()).commit(); getSupportActionBar().setTitle(getResources().getString(R.string.home_drawer)); } // chrome custom tab init customTabActivityHelper = new CustomTabActivityHelper(); customTabActivityHelper.setConnectionCallback(this); customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getBaseContext(), customTabActivityHelper.getSession()); // Create the AccountHeader headerResult = new AccountHeaderBuilder() .withActivity(this) .withTranslucentStatusBar(true) .withHeaderBackground(R.drawable.header) .addProfiles(profile) .withSelectionListEnabledForSingleProfile(false) .withOnAccountHeaderListener((view, profile1, current) -> { if (profile1 instanceof IDrawerItem) { if (profile1.getIdentifier() == ITEM_MANAGE_ACCOUNT) { CustomTabActivityHelper.openCustomTab(MainActivity.this, customTabsIntent, userAccountUri, new WebViewFallback()); } } //false if you have not consumed the event and it should close the drawer return false; }) .withSavedInstance(savedInstanceState) .build(); // Add Manage Account profile if the user is connected SharedPreferences preferences = getSharedPreferences("login", 0); String userLogin = preferences.getString("user", null); String userSession = preferences.getString("user_session", null); boolean isUserConnected = userLogin != null && userSession != null; boolean isConnected = userLogin != null; if (isUserConnected) { userAccountUri = Uri.parse(getString(R.string.website) + "cgi/user.pl?type=edit&userid=" + userLogin + "&user_id=" + userLogin + "&user_session=" + userSession); customTabActivityHelper.mayLaunchUrl(userAccountUri, null, null); headerResult.addProfiles(getProfileSettingDrawerItem()); } primaryDrawerItem = createOfflineEditDrawerItem(); //Create the drawer result = new DrawerBuilder() .withActivity(this) .withToolbar(toolbar) .withHasStableIds(true) .withAccountHeader(headerResult) //set the AccountHeader we created earlier for the header .withOnDrawerListener(new Drawer.OnDrawerListener() { @Override public void onDrawerOpened(View drawerView) { Utils.hideKeyboard(MainActivity.this); } @Override public void onDrawerClosed(View drawerView) { } @Override public void onDrawerSlide(View drawerView, float slideOffset) { Utils.hideKeyboard(MainActivity.this); } }) .addDrawerItems( new PrimaryDrawerItem().withName(R.string.home_drawer).withIcon(GoogleMaterial.Icon.gmd_home).withIdentifier(ITEM_HOME), new SectionDrawerItem().withName(R.string.search_drawer), new PrimaryDrawerItem().withName(R.string.search_by_barcode_drawer).withIcon(GoogleMaterial.Icon.gmd_dialpad).withIdentifier(ITEM_SEARCH_BY_CODE), new PrimaryDrawerItem().withName(R.string.search_by_category).withIcon(GoogleMaterial.Icon.gmd_filter_list).withIdentifier(ITEM_CATEGORIES).withSelectable(false), new PrimaryDrawerItem().withName(R.string.scan_search).withIcon(R.drawable.barcode_grey_24dp).withIdentifier(ITEM_SCAN).withSelectable(false), new PrimaryDrawerItem().withName(R.string.advanced_search_title).withIcon(GoogleMaterial.Icon.gmd_insert_chart).withIdentifier(ITEM_ADVANCED_SEARCH).withSelectable(false), new PrimaryDrawerItem().withName(R.string.scan_history_drawer).withIcon(GoogleMaterial.Icon.gmd_history).withIdentifier(ITEM_HISTORY).withSelectable(false), new SectionDrawerItem().withName(R.string.user_drawer).withIdentifier(USER_ID), new PrimaryDrawerItem().withName(getString(R.string.action_contributes)).withIcon(GoogleMaterial.Icon.gmd_rate_review).withIdentifier(ITEM_MY_CONTRIBUTIONS).withSelectable(false), new PrimaryDrawerItem().withName(R.string.alert_drawer).withIcon(GoogleMaterial.Icon.gmd_warning).withIdentifier(ITEM_ALERT), new PrimaryDrawerItem().withName(R.string.action_preferences).withIcon(GoogleMaterial.Icon.gmd_settings).withIdentifier(ITEM_PREFERENCES), new DividerDrawerItem(), primaryDrawerItem, new DividerDrawerItem(), new PrimaryDrawerItem().withName(R.string.action_discover).withIcon(GoogleMaterial.Icon.gmd_info).withIdentifier(ITEM_ABOUT).withSelectable(false), new PrimaryDrawerItem().withName(R.string.contribute).withIcon(R.drawable.ic_group_grey_24dp).withIdentifier(ITEM_CONTRIBUTE).withSelectable(false), new PrimaryDrawerItem().withName(R.string.open_beauty_drawer).withIcon(GoogleMaterial.Icon.gmd_shop).withIdentifier(ITEM_OBF).withSelectable(false) ) .withOnDrawerItemClickListener((view, position, drawerItem) -> { if (drawerItem == null) { return false; } Fragment fragment = null; switch ((int) drawerItem.getIdentifier()) { case ITEM_HOME: fragment = new HomeFragment(); // recreate when Home is pressed recreate(); break; case ITEM_SEARCH_BY_CODE: fragment = new FindProductFragment(); break; case ITEM_CATEGORIES: startActivity(CategoryActivity.getIntent(this)); break; case ITEM_SCAN: scan(); break; case ITEM_HISTORY: startActivity(new Intent(MainActivity.this, HistoryScanActivity.class)); break; case ITEM_LOGIN: startActivityForResult(new Intent(MainActivity.this, LoginActivity .class), LOGIN_REQUEST); break; case ITEM_ALERT: fragment = new AllergensAlertFragment(); break; case ITEM_PREFERENCES: fragment = new PreferencesFragment(); break; case ITEM_OFFLINE: fragment = new OfflineEditFragment(); break; case ITEM_ABOUT: CustomTabActivityHelper.openCustomTab(MainActivity.this, customTabsIntent, discoverUri, new WebViewFallback()); break; case ITEM_CONTRIBUTE: CustomTabActivityHelper.openCustomTab(MainActivity.this, customTabsIntent, contributeUri, new WebViewFallback()); break; case ITEM_OBF: boolean otherOFAppInstalled = Utils.isApplicationInstalled (MainActivity.this, BuildConfig.OFOTHERLINKAPP); if (otherOFAppInstalled) { Intent LaunchIntent = getPackageManager() .getLaunchIntentForPackage(BuildConfig.OFOTHERLINKAPP); if (LaunchIntent != null) { startActivity(LaunchIntent); } else { Toast.makeText(this, R.string.app_disabled_text, Toast.LENGTH_SHORT).show(); Intent intent = new Intent(); intent.setAction(Settings.ACTION_APPLICATION_DETAILS_SETTINGS); Uri uri = Uri.fromParts("package", BuildConfig.OFOTHERLINKAPP, null); intent.setData(uri); startActivity(intent); } } else { try { startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse ("market://details?id=" + BuildConfig.OFOTHERLINKAPP))); } catch (ActivityNotFoundException anfe) { startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse("https://play.google.com/store/apps/details?id=" + BuildConfig.OFOTHERLINKAPP))); } } break; case ITEM_ADVANCED_SEARCH: CustomTabsIntent.Builder builder = new CustomTabsIntent.Builder(); CustomTabsIntent customTabsIntent = builder.build(); CustomTabActivityHelper.openCustomTab(this, customTabsIntent, Uri.parse(getString(R.string.advanced_search_url)), new WebViewFallback()); break; case ITEM_MY_CONTRIBUTIONS: myContributions(); break; case ITEM_LOGOUT: new MaterialDialog.Builder(MainActivity.this) .title(R.string.confirm_logout) .content(R.string.logout_dialog_content) .positiveText(R.string.txtOk) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> logout()) .onNegative((dialog, which) -> Toast.makeText(getApplicationContext(), "Cancelled", Toast.LENGTH_SHORT).show()).show(); break; default: // nothing to do break; } if (fragment != null) { getSupportFragmentManager().beginTransaction().replace(R.id.fragment_container, fragment).addToBackStack(null).commit(); } else { // error in creating fragment Log.e("MainActivity", "Error in creating fragment"); } return false; }) .withSavedInstance(savedInstanceState) .withShowDrawerOnFirstLaunch(false) .build(); result.getActionBarDrawerToggle().setDrawerIndicatorEnabled(true); // Add Drawer items for the connected user result.addItemsAtPosition(result.getPosition(ITEM_MY_CONTRIBUTIONS), isConnected ? getLogoutDrawerItem() : getLoginDrawerItem()); if (BuildConfig.FLAVOR.equals("obf")) { result.removeItem(ITEM_ALERT); result.updateName(ITEM_OBF, new StringHolder(getString(R.string.open_food_drawer))); } if (BuildConfig.FLAVOR.equals("opff")) { result.removeItem(ITEM_ALERT); result.updateName(ITEM_OBF, new StringHolder(getString(R.string.open_food_drawer))); } if (BuildConfig.FLAVOR.equals("opf")) { result.removeItem(ITEM_ALERT); result.removeItem(ITEM_ADVANCED_SEARCH); result.removeItem(ITEM_CATEGORIES); result.updateName(ITEM_OBF, new StringHolder(getString(R.string.open_food_drawer))); } // Remove scan item if the device does not have a camera, for example, Chromebooks or // Fire devices if (!Utils.isHardwareCameraInstalled(this)) { result.removeItem(ITEM_SCAN); } //if you have many different types of DrawerItems you can magically pre-cache those items // to get a better scroll performance //make sure to init the cache after the DrawerBuilder was created as this will first // clear the cache to make sure no old elements are in //RecyclerViewCacheUtil.getInstance().withCacheSize(2).init(result); new RecyclerViewCacheUtil<IDrawerItem>().withCacheSize(2).apply(result.getRecyclerView(), result.getDrawerItems()); //only set the active selection or active profile if we do not recreate the activity if (savedInstanceState == null) { // set the selection to the item with the identifier 1 result.setSelection(ITEM_HOME, false); //set the active profile headerResult.setActiveProfile(profile); } SharedPreferences settings = PreferenceManager.getDefaultSharedPreferences(getBaseContext ()); if (settings.getBoolean("startScan", false)) { Intent cameraIntent = new Intent(MainActivity.this, ScannerFragmentActivity.class); cameraIntent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP); startActivity(cameraIntent); } // prefetch uris contributeUri = Uri.parse(getString(R.string.website_contribute)); discoverUri = Uri.parse(getString(R.string.website_discover)); userContributeUri = Uri.parse(getString(R.string.website_contributor) + userLogin); customTabActivityHelper.mayLaunchUrl(contributeUri, null, null); customTabActivityHelper.mayLaunchUrl(discoverUri, null, null); customTabActivityHelper.mayLaunchUrl(userContributeUri, null, null); if (CONTRIBUTIONS_SHORTCUT.equals(getIntent().getAction())) { myContributions(); } if (SCAN_SHORTCUT.equals(getIntent().getAction())) { scan(); } if (BARCODE_SHORTCUT.equals(getIntent().getAction())) { moveToBarcodeEntry(); } //Scheduling background image upload job Utils.scheduleProductUploadJob(this); //Adds nutriscore and quantity values in old history for schema 5 update mSharedPref = getApplicationContext().getSharedPreferences("prefs", 0); boolean isOldHistoryDataSynced = mSharedPref.getBoolean("is_old_history_data_synced", false); if (!isOldHistoryDataSynced && Utils.isNetworkConnected(this)) { OpenFoodAPIClient apiClient = new OpenFoodAPIClient(this); apiClient.syncOldHistory(); } } private void scan() { if (ContextCompat.checkSelfPermission(MainActivity.this, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) { if (ActivityCompat.shouldShowRequestPermissionRationale(MainActivity.this, Manifest .permission.CAMERA)) { new MaterialDialog.Builder(MainActivity.this) .title(R.string.action_about) .content(R.string.permission_camera) .neutralText(R.string.txtOk) .show().setOnDismissListener(dialogInterface -> ActivityCompat.requestPermissions(MainActivity.this, new String[]{Manifest.permission.CAMERA}, Utils.MY_PERMISSIONS_REQUEST_CAMERA)); } else { ActivityCompat.requestPermissions(MainActivity.this, new String[]{Manifest .permission.CAMERA}, Utils.MY_PERMISSIONS_REQUEST_CAMERA); } } else { Intent intent = new Intent(MainActivity.this, ScannerFragmentActivity.class); intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP); startActivity(intent); } } private void myContributions() { SharedPreferences preferences1 = getSharedPreferences("login", 0); String userLogin1 = preferences1.getString("user", null); userContributeUri = Uri.parse(getString(R.string.website_contributor) + userLogin1); if (isNotEmpty(userLogin1)) { ProductBrowsingListActivity.startActivity(this, userLogin1, SearchType.CONTRIBUTOR); } else { new MaterialDialog.Builder(MainActivity.this) .title(R.string.contribute) .content(R.string.contribution_without_account) .positiveText(R.string.create_account_button) .neutralText(R.string.login_button) .onPositive((dialog, which) -> CustomTabActivityHelper.openCustomTab(MainActivity.this, customTabsIntent, Uri.parse(getString(R .string.website) + "cgi/user.pl"), new WebViewFallback())) .onNeutral((dialog, which) -> startActivityForResult(new Intent(MainActivity.this, LoginActivity.class), LOGIN_REQUEST)) .show(); } } private IProfile<ProfileSettingDrawerItem> getProfileSettingDrawerItem() { SharedPreferences preferences = getSharedPreferences("login", 0); String userLogin = preferences.getString("user", null); String userSession = preferences.getString("user_session", null); userAccountUri = Uri.parse(getString(R.string.website) + "cgi/user.pl?type=edit&userid=" + userLogin + "&user_id=" + userLogin + "&user_session=" + userSession); customTabActivityHelper.mayLaunchUrl(userAccountUri, null, null); return new ProfileSettingDrawerItem() .withName(getString(R.string.action_manage_account)) .withIcon(GoogleMaterial.Icon.gmd_settings) .withIdentifier(ITEM_MANAGE_ACCOUNT) .withSelectable(false); } /** * Replace logout menu item by the login menu item * Change current user profile (Anonymous) * Remove all Account Header items * Remove user login info */ private void logout() { getSharedPreferences("login", MODE_PRIVATE).edit().clear().commit(); headerResult.removeProfileByIdentifier(ITEM_MANAGE_ACCOUNT); headerResult.updateProfile(getUserProfile()); result.addItemAtPosition(getLoginDrawerItem(), result.getPosition(ITEM_MY_CONTRIBUTIONS)); result.removeItem(ITEM_LOGOUT); } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { switch (requestCode) { case LOGIN_REQUEST: if (resultCode == RESULT_OK) { result.removeItem(ITEM_LOGIN); result.addItemsAtPosition(result.getPosition(ITEM_MY_CONTRIBUTIONS), getLogoutDrawerItem()); headerResult.updateProfile(getUserProfile()); headerResult.addProfiles(getProfileSettingDrawerItem()); } break; default: // do nothing break; } } @Override protected void onSaveInstanceState(Bundle outState) { //add the values which need to be saved from the drawer to the bundle outState = result.saveInstanceState(outState); //add the values which need to be saved from the accountHeader to the bundle outState = headerResult.saveInstanceState(outState); super.onSaveInstanceState(outState); } @Override public void onBackPressed() { //handle the back press :D close the drawer first and if the drawer is closed close the // activity if (result != null && result.isDrawerOpen()) { result.closeDrawer(); } else { if (getSupportFragmentManager().getBackStackEntryCount() > 0) { getSupportFragmentManager().popBackStack(getSupportFragmentManager().getBackStackEntryAt(0).getId(), getSupportFragmentManager().POP_BACK_STACK_INCLUSIVE); //recreate the activity onBackPressed recreate(); } else { super.onBackPressed(); } } } @Override public boolean onCreateOptionsMenu(Menu menu) { getMenuInflater().inflate(R.menu.menu_main, menu); // Associate searchable configuration with the SearchView SearchManager searchManager = (SearchManager) getSystemService(Context.SEARCH_SERVICE); searchMenuItem = menu.findItem(R.id.action_search); SearchView searchView = (SearchView) searchMenuItem.getActionView(); if (searchManager.getSearchableInfo(getComponentName()) != null) { searchView.setSearchableInfo(searchManager.getSearchableInfo(getComponentName())); } searchMenuItem.setOnActionExpandListener(new MenuItem.OnActionExpandListener() { @Override public boolean onMenuItemActionExpand(MenuItem menuItem) { return true; } @Override public boolean onMenuItemActionCollapse(MenuItem menuItem) { FragmentManager fragmentManager = getSupportFragmentManager(); Fragment currentFragment = fragmentManager.findFragmentById(R.id .fragment_container); return true; } }); return true; } @Override public void onRequestPermissionsResult(int requestCode, @NonNull String permissions[], @NonNull int[] grantResults) { super.onRequestPermissionsResult(requestCode, permissions, grantResults); switch (requestCode) { case Utils.MY_PERMISSIONS_REQUEST_CAMERA: { if (grantResults.length > 0 && grantResults[0] == PackageManager .PERMISSION_GRANTED) { Intent intent = new Intent(MainActivity.this, ScannerFragmentActivity.class); intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP); startActivity(intent); } } break; } } private IDrawerItem<PrimaryDrawerItem, com.mikepenz.materialdrawer.model .AbstractBadgeableDrawerItem.ViewHolder> getLogoutDrawerItem() { return new PrimaryDrawerItem() .withName(getString(R.string.logout_drawer)) .withIcon(GoogleMaterial.Icon.gmd_settings_power) .withIdentifier(ITEM_LOGOUT) .withSelectable(false); } private IDrawerItem<PrimaryDrawerItem, com.mikepenz.materialdrawer.model .AbstractBadgeableDrawerItem.ViewHolder> getLoginDrawerItem() { return new PrimaryDrawerItem() .withName(R.string.sign_in_drawer) .withIcon(GoogleMaterial.Icon.gmd_account_circle) .withIdentifier(ITEM_LOGIN) .withSelectable(false); } private IProfile<ProfileDrawerItem> getUserProfile() { String userLogin = getSharedPreferences("login", 0) .getString("user", getResources().getString(R.string.txt_anonymous)); return new ProfileDrawerItem() .withName(userLogin) .withIcon(R.drawable.img_home) .withIdentifier(ITEM_USER); } @Override public void onCustomTabsConnected() { } @Override public void onCustomTabsDisconnected() { } @Override protected void onStart() { super.onStart(); customTabActivityHelper.bindCustomTabsService(this); } @Override protected void onStop() { super.onStop(); customTabActivityHelper.unbindCustomTabsService(this); } @Override protected void onDestroy() { customTabActivityHelper.setConnectionCallback(null); super.onDestroy(); } @Override protected void onNewIntent(Intent intent) { String type = intent.getType(); if (Intent.ACTION_SEARCH.equals(intent.getAction())) { Log.e("INTENT", "start activity"); String query = intent.getStringExtra(SearchManager.QUERY); ProductBrowsingListActivity.startActivity(this, query, SearchType.SEARCH); if(searchMenuItem!=null) { searchMenuItem.collapseActionView(); } } else if (Intent.ACTION_SEND.equals(intent.getAction()) && type != null) { if (type.startsWith("image/")) { handleSendImage(intent); // Handle single image being sent } } else if (Intent.ACTION_SEND_MULTIPLE.equals(intent.getAction()) && type != null) { if (type.startsWith("image/")) { handleSendMultipleImages(intent); // Handle multiple images being sent } } } /** * This moves the main activity to the barcode entry fragment. */ public void moveToBarcodeEntry() { result.setSelection(ITEM_SEARCH_BY_CODE); Fragment fragment = new FindProductFragment(); getSupportActionBar().setTitle(getResources().getString(R.string.search_by_barcode_drawer)); getSupportFragmentManager().beginTransaction().replace(R.id.fragment_container, fragment).commit(); } /** * This moves the main activity to the preferences fragment. */ public void moveToPreferences() { result.setSelection(ITEM_PREFERENCES); Fragment fragment = new PreferencesFragment(); getSupportActionBar().setTitle(R.string.action_preferences); getSupportFragmentManager().beginTransaction().replace(R.id.fragment_container, fragment).commit(); } /** * Create the drawer item. This adds a badge if there are items in the offline edit, otherwise * there is no badge present. * * @return drawer item. */ private PrimaryDrawerItem createOfflineEditDrawerItem() { if (numberOFSavedProducts > 0) { return new PrimaryDrawerItem().withName(R.string.offline_edit_drawer).withIcon(GoogleMaterial.Icon.gmd_local_airport).withIdentifier(9) .withBadge(String.valueOf(numberOFSavedProducts)).withBadgeStyle(new BadgeStyle().withTextColor(Color.WHITE).withColorRes(R .color.md_red_700)); } else { return new PrimaryDrawerItem().withName(R.string.offline_edit_drawer).withIcon(GoogleMaterial.Icon.gmd_local_airport).withIdentifier(ITEM_OFFLINE); } } /** * Updates the drawer item. This updates the badge if there are items left in offline edit, otherwise * there is no badge present. * This function is called from OfflineEditFragment only. */ public void updateBadgeOfflineEditDrawerITem(int size) { positionOfOfflineBadeItem = result.getPosition(primaryDrawerItem); if (size > 0) { primaryDrawerItem = new PrimaryDrawerItem().withName(R.string.offline_edit_drawer).withIcon(GoogleMaterial.Icon.gmd_local_airport).withIdentifier(ITEM_OFFLINE).withBadge(String.valueOf(size)).withBadgeStyle(new BadgeStyle().withTextColor(Color.WHITE).withColorRes(R.color.md_red_700)); } else { primaryDrawerItem = new PrimaryDrawerItem().withName(R.string.offline_edit_drawer).withIcon(GoogleMaterial.Icon.gmd_local_airport).withIdentifier(ITEM_OFFLINE); } result.updateItemAtPosition(primaryDrawerItem, positionOfOfflineBadeItem); } @Override public void setItemSelected(@NavigationDrawerType Integer type) { result.setSelection(type, false); } @Override public void onPause() { super.onPause(); shakePreference.unregisterOnSharedPreferenceChangeListener(this); if (scanOnShake) { // unregister the listener mSensorManager.unregisterListener(mShakeDetector, mAccelerometer); } } @Override public void onResume() { super.onResume(); shakePreference.registerOnSharedPreferenceChangeListener(this); if (scanOnShake) { //register the listener mSensorManager.registerListener(mShakeDetector, mAccelerometer, SensorManager.SENSOR_DELAY_UI); } } private void handleSendImage(Intent intent) { Uri selectedImage = null; ArrayList<Uri> selectedImagesArray = new ArrayList<>(); selectedImage = (Uri) intent.getParcelableExtra(Intent.EXTRA_STREAM); boolean isBarCodePresent = false; if (selectedImage != null) { selectedImagesArray.add(selectedImage); chooseDialog(selectedImagesArray); } } private void handleSendMultipleImages(Intent intent) { ArrayList<Uri> selectedImagesArray = intent.getParcelableArrayListExtra(Intent.EXTRA_STREAM); if (selectedImagesArray != null) { chooseDialog(selectedImagesArray); } } private void chooseDialog(ArrayList<Uri> selectedImagesArray) { boolean isBarCodePresent = false; isBarCodePresent = isBarCodePresent || detectBarCodeInImage(selectedImagesArray); if (isBarCodePresent) { createAlertDialog(false, mBarcode, selectedImagesArray); } else { createAlertDialog(true, "", selectedImagesArray); } } private boolean detectBarCodeInImage(ArrayList<Uri> selectedImages) { InputStream imageStream = null; for (Uri uri : selectedImages) { try { imageStream = getContentResolver().openInputStream(uri); } catch (FileNotFoundException e) { e.printStackTrace(); } //decoding bitmap Bitmap bMap = BitmapFactory.decodeStream(imageStream); int[] intArray = new int[bMap.getWidth() * bMap.getHeight()]; bMap.getPixels(intArray, 0, bMap.getWidth(), 0, 0, bMap.getWidth(), bMap.getHeight()); LuminanceSource source = new RGBLuminanceSource(bMap.getWidth(), bMap.getHeight(), intArray); BinaryBitmap bitmap = new BinaryBitmap(new HybridBinarizer(source)); Reader reader = new MultiFormatReader(); try { Hashtable<DecodeHintType, Object> decodeHints = new Hashtable<DecodeHintType, Object>(); decodeHints.put(DecodeHintType.TRY_HARDER, Boolean.TRUE); decodeHints.put(DecodeHintType.PURE_BARCODE, Boolean.TRUE); Result result = reader.decode(bitmap, decodeHints); mBarcode = result.getText().toString(); if (mBarcode != null) { return true; } } catch (NotFoundException e) { e.printStackTrace(); } catch (ChecksumException e) { e.printStackTrace(); } catch (FormatException e) { Toast.makeText(getApplicationContext(), getString(R.string.format_error), Toast.LENGTH_SHORT).show(); e.printStackTrace(); } catch (NullPointerException e) { e.printStackTrace(); } } return false; } private void createAlertDialog(boolean hasEditText, String barcode, ArrayList<Uri> uri) { AlertDialog.Builder alertDialogBuilder = new AlertDialog.Builder(this); LayoutInflater inflater = this.getLayoutInflater(); View dialogView = inflater.inflate(R.layout.alert_barcode, null); alertDialogBuilder.setView(dialogView); final EditText barcode_edittext = (EditText) dialogView.findViewById(R.id.barcode); final ImageView product_image = (ImageView) dialogView.findViewById(R.id.product_image); product_image.setImageURI(uri.get(0)); if (hasEditText) { barcode_edittext.setVisibility(View.VISIBLE); product_image.setVisibility(View.VISIBLE); alertDialogBuilder.setTitle(getString(R.string.no_barcode)); alertDialogBuilder.setMessage(getString(R.string.enter_barcode)); } else { alertDialogBuilder.setTitle(getString(R.string.code_detected)); alertDialogBuilder.setMessage(barcode + "\n" + getString(R.string.do_you_want_to)); } // set dialog message alertDialogBuilder .setCancelable(false) .setPositiveButton(R.string.txtYes, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { String temp_barcode = ""; for (Uri selected : uri) { OpenFoodAPIClient api = new OpenFoodAPIClient(MainActivity.this); ProductImage image = null; ConnectivityManager cm = (ConnectivityManager) getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo activeNetwork = cm.getActiveNetworkInfo(); if (hasEditText) { temp_barcode = barcode_edittext.getText().toString(); } else { temp_barcode = barcode; } if (temp_barcode.length() > 0) { dialog.cancel(); if (activeNetwork != null && activeNetwork.isConnectedOrConnecting()) { image = new ProductImage(temp_barcode, OTHER, new File(selected.getPath())); api.postImg(MainActivity.this, image); } else { Intent intent = new Intent(MainActivity.this, SaveProductOfflineActivity.class); intent.putExtra("barcode", barcode); startActivity(intent); } } else { Toast.makeText(MainActivity.this, getString(R.string.sorry_msg), Toast.LENGTH_LONG).show(); } } } }) .setNegativeButton(R.string.txtNo, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { dialog.cancel(); } }); AlertDialog alertDialog = alertDialogBuilder.create(); alertDialog.show(); } @Override public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String s) { // restart activity if scan on shake is chosen if (sharedPreferences.getBoolean("shakeScanMode", false) != scanOnShake) { this.recreate(); } } }
1
64,442
Does this work at runtime?
openfoodfacts-openfoodfacts-androidapp
java
@@ -24,8 +24,9 @@ import ( const ( userAgentHeader = "User-Agent" - credsTimeout = 10 * time.Second - clientTimeout = 30 * time.Second + maxRetriesOnRecoverableFailures = 8 // Default provided by SDK is 3 which means requests are retried up to only 2 seconds. + credsTimeout = 10 * time.Second + clientTimeout = 30 * time.Second ) // Provider provides methods to create sessions.
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package sessions provides functions that return AWS sessions to use in the AWS SDK. package sessions import ( "context" "fmt" "net/http" "runtime" "sync" "time" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/copilot-cli/internal/pkg/version" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" ) const ( userAgentHeader = "User-Agent" credsTimeout = 10 * time.Second clientTimeout = 30 * time.Second ) // Provider provides methods to create sessions. // Once a session is created, it's cached locally so that the same session is not re-created. type Provider struct { defaultSess *session.Session } var instance *Provider var once sync.Once // NewProvider returns a session Provider singleton. func NewProvider() *Provider { once.Do(func() { instance = &Provider{} }) return instance } // Default returns a session configured against the "default" AWS profile. func (p *Provider) Default() (*session.Session, error) { if p.defaultSess != nil { return p.defaultSess, nil } sess, err := session.NewSessionWithOptions(session.Options{ Config: *newConfig(), SharedConfigState: session.SharedConfigEnable, }) if err != nil { return nil, err } sess.Handlers.Build.PushBackNamed(userAgentHandler()) p.defaultSess = sess return sess, nil } // DefaultWithRegion returns a session configured against the "default" AWS profile and the input region. func (p *Provider) DefaultWithRegion(region string) (*session.Session, error) { sess, err := session.NewSessionWithOptions(session.Options{ Config: *newConfig().WithRegion(region), SharedConfigState: session.SharedConfigEnable, }) if err != nil { return nil, err } sess.Handlers.Build.PushBackNamed(userAgentHandler()) return sess, nil } // FromProfile returns a session configured against the input profile name. func (p *Provider) FromProfile(name string) (*session.Session, error) { sess, err := session.NewSessionWithOptions(session.Options{ Config: *newConfig(), SharedConfigState: session.SharedConfigEnable, Profile: name, }) if err != nil { return nil, err } sess.Handlers.Build.PushBackNamed(userAgentHandler()) return sess, nil } // FromRole returns a session configured against the input role and region. func (p *Provider) FromRole(roleARN string, region string) (*session.Session, error) { defaultSession, err := p.Default() if err != nil { return nil, fmt.Errorf("error creating default session: %w", err) } creds := stscreds.NewCredentials(defaultSession, roleARN) sess, err := session.NewSession( newConfig(). WithCredentials(creds). WithRegion(region), ) if err != nil { return nil, err } sess.Handlers.Build.PushBackNamed(userAgentHandler()) return sess, nil } // FromStaticCreds returns a session from static credentials. func (p *Provider) FromStaticCreds(accessKeyID, secretAccessKey, sessionToken string) (*session.Session, error) { conf := newConfig() conf.Credentials = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, sessionToken) sess, err := session.NewSessionWithOptions(session.Options{ Config: *conf, }) if err != nil { return nil, fmt.Errorf("create session from static credentials: %w", err) } sess.Handlers.Build.PushBackNamed(userAgentHandler()) return sess, nil } // AreCredsFromEnvVars returns true if the session's credentials provider is environment variables, false otherwise. // An error is returned if the credentials are invalid or the request times out. func AreCredsFromEnvVars(sess *session.Session) (bool, error) { v, err := Creds(sess) if err != nil { return false, err } return v.ProviderName == session.EnvProviderName, nil } // Creds returns the credential values from a session. func Creds(sess *session.Session) (credentials.Value, error) { ctx, cancel := context.WithTimeout(context.Background(), credsTimeout) defer cancel() v, err := sess.Config.Credentials.GetWithContext(ctx) if err != nil { return credentials.Value{}, fmt.Errorf("get credentials of session: %w", err) } return v, nil } // newConfig returns a config with an end-to-end request timeout and verbose credentials errors. func newConfig() *aws.Config { c := &http.Client{ Timeout: clientTimeout, } return aws.NewConfig(). WithHTTPClient(c). WithCredentialsChainVerboseErrors(true) } // userAgentHandler returns a http request handler that sets a custom user agent to all aws requests. func userAgentHandler() request.NamedHandler { return request.NamedHandler{ Name: "UserAgentHandler", Fn: func(r *request.Request) { userAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("aws-ecs-cli-v2/%s (%s) %s", version.Version, runtime.GOOS, userAgent)) }, } }
1
16,510
Hmm why do we need to make this change? Like which one requires this change?
aws-copilot-cli
go
@@ -4,12 +4,6 @@ // Use define from require.js not webpack's define var _define = window.define; -// document-register-element -var docRegister = require("document-register-element"); -_define("document-register-element", function() { - return docRegister; -}); - // fetch var fetch = require("whatwg-fetch"); _define("fetch", function() {
1
/** * require.js module definitions bundled by webpack */ // Use define from require.js not webpack's define var _define = window.define; // document-register-element var docRegister = require("document-register-element"); _define("document-register-element", function() { return docRegister; }); // fetch var fetch = require("whatwg-fetch"); _define("fetch", function() { return fetch }); // flvjs var flvjs = require("flv.js"); _define("flvjs", function() { return flvjs; }); // jstree var jstree = require("jstree"); require("jstree/dist/themes/default/style.css"); _define("jstree", function() { return jstree; }); // jquery var jquery = require("jquery"); _define("jQuery", function() { return jquery; }); // hlsjs var hlsjs = require("hls.js"); _define("hlsjs", function() { return hlsjs; }); // howler var howler = require("howler"); _define("howler", function() { return howler; }); // native-promise-only var nativePromise = require("native-promise-only"); _define("native-promise-only", function() { return nativePromise; }); // resize-observer-polyfill var resize = require("resize-observer-polyfill"); _define("resize-observer-polyfill", function() { return resize; }); // shaka var shaka = require("shaka-player"); _define("shaka", function() { return shaka; }); // swiper var swiper = require("swiper"); require("swiper/dist/css/swiper.min.css"); _define("swiper", function() { return swiper; }); // sortable var sortable = require("sortablejs"); _define("sortable", function() { return sortable; }); // webcomponents var webcomponents = require("webcomponents.js-2"); _define("webcomponents", function() { return webcomponents }); // libjass var libjass = require("libjass"); require("libjass/libjass.css"); _define("libjass", function() { return libjass; });
1
12,355
why remove this and other piece?
jellyfin-jellyfin-web
js
@@ -41,7 +41,7 @@ describe('dom.hasContentVirtual', function() { it('is false if the element has an aria label but `ignoreAria=true`', function() { fixture.innerHTML = '<div id="target" aria-label="my-label"> </div>'; tree = axe.utils.getFlattenedTree(fixture); - assert.isTrue( + assert.isFalse( hasContentVirtual( axe.utils.querySelectorAll(tree, '#target')[0], true,
1
/* global xit */ describe('dom.hasContentVirtual', function() { 'use strict'; var hasContentVirtual = axe.commons.dom.hasContentVirtual; var fixture = document.getElementById('fixture'); var shadowSupport = axe.testUtils.shadowSupport.v1; var tree; it('returns false if there is no content', function() { fixture.innerHTML = '<div id="target"> </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isFalse( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('returns false if there are non-visual elements', function() { fixture.innerHTML = '<div id="target"> <span></span> </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isFalse( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('is true if the element has non-empty text', function() { fixture.innerHTML = '<div id="target"> text </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('is true if the element has an aria label', function() { fixture.innerHTML = '<div id="target" aria-label="my-label"> </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('is false if the element has an aria label but `ignoreAria=true`', function() { fixture.innerHTML = '<div id="target" aria-label="my-label"> </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual( axe.utils.querySelectorAll(tree, '#target')[0], true, true ) ); }); it('is true if the element contains visual content', function() { fixture.innerHTML = '<div id="target"> <img src=""> </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('is true if the element contains a node with a aria-label', function() { fixture.innerHTML = '<div id="target"> <span aria-label="my-label"></span> </div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('is false if the element does not show text', function() { fixture.innerHTML = '<style id="target"> #foo { color: green } </style>'; tree = axe.utils.getFlattenedTree(fixture); assert.isFalse( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); }); it('is called through hasContent, with a DOM node', function() { var hasContent = axe.commons.dom.hasContent; fixture.innerHTML = '<div id="target"> text </div>'; axe.testUtils.flatTreeSetup(fixture); assert.isTrue(hasContent(fixture.querySelector('#target'))); fixture.innerHTML = '<div id="target"></div>'; axe.testUtils.flatTreeSetup(fixture); assert.isFalse(hasContent(fixture.querySelector('#target'))); }); it('is false if noRecursion is true and the content is not in a child', function() { fixture.innerHTML = '<div id="target"><span> text </span></div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isFalse( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0], true) ); }); (shadowSupport ? it : xit)( 'looks at content of shadow dom elements', function() { fixture.innerHTML = '<div id="target"></div>'; var shadow = fixture .querySelector('#target') .attachShadow({ mode: 'open' }); shadow.innerHTML = 'Some text'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual(axe.utils.querySelectorAll(tree, '#target')[0]) ); } ); (shadowSupport ? it : xit)('looks at the slots in a shadow tree', function() { fixture.innerHTML = '<div id="shadow">some text</div>'; var shadow = fixture .querySelector('#shadow') .attachShadow({ mode: 'open' }); shadow.innerHTML = '<div class="target"><slot></slot></div>'; tree = axe.utils.getFlattenedTree(fixture); assert.isTrue( hasContentVirtual(axe.utils.querySelectorAll(tree, '.target')[0]) ); }); });
1
14,918
Yikes... That was probably me.
dequelabs-axe-core
js
@@ -622,7 +622,7 @@ class Application extends BaseApplication { // the following menu items will be available for all OS under Tools const toolsItemsAll = [{ - label: _('Resources'), + label: _('Note attachments...'), click: () => { this.dispatch({ type: 'NAV_GO',
1
require('app-module-path').addPath(__dirname); const { BaseApplication } = require('lib/BaseApplication'); const { FoldersScreenUtils } = require('lib/folders-screen-utils.js'); const Setting = require('lib/models/Setting.js'); const { shim } = require('lib/shim.js'); const MasterKey = require('lib/models/MasterKey'); const Note = require('lib/models/Note'); const { MarkupToHtml } = require('lib/joplin-renderer'); const { _, setLocale } = require('lib/locale.js'); const { Logger } = require('lib/logger.js'); const fs = require('fs-extra'); const Tag = require('lib/models/Tag.js'); const { reg } = require('lib/registry.js'); const { defaultState } = require('lib/reducer.js'); const packageInfo = require('./packageInfo.js'); const AlarmService = require('lib/services/AlarmService.js'); const AlarmServiceDriverNode = require('lib/services/AlarmServiceDriverNode'); const DecryptionWorker = require('lib/services/DecryptionWorker'); const InteropService = require('lib/services/InteropService'); const InteropServiceHelper = require('./InteropServiceHelper.js'); const ResourceService = require('lib/services/ResourceService'); const ClipperServer = require('lib/ClipperServer'); const ExternalEditWatcher = require('lib/services/ExternalEditWatcher'); const { bridge } = require('electron').remote.require('./bridge'); const { shell, webFrame, clipboard } = require('electron'); const Menu = bridge().Menu; const PluginManager = require('lib/services/PluginManager'); const RevisionService = require('lib/services/RevisionService'); const MigrationService = require('lib/services/MigrationService'); const TemplateUtils = require('lib/TemplateUtils'); const CssUtils = require('lib/CssUtils'); const pluginClasses = [ require('./plugins/GotoAnything.min'), ]; const appDefaultState = Object.assign({}, defaultState, { route: { type: 'NAV_GO', routeName: 'Main', props: {}, }, navHistory: [], fileToImport: null, windowCommand: null, noteVisiblePanes: ['editor', 'viewer'], sidebarVisibility: true, noteListVisibility: true, windowContentSize: bridge().windowContentSize(), watchedNoteFiles: [], lastEditorScrollPercents: {}, devToolsVisible: false, }); class Application extends BaseApplication { constructor() { super(); this.lastMenuScreen_ = null; } hasGui() { return true; } checkForUpdateLoggerPath() { return `${Setting.value('profileDir')}/log-autoupdater.txt`; } reducer(state = appDefaultState, action) { let newState = state; try { switch (action.type) { case 'NAV_BACK': case 'NAV_GO': { const goingBack = action.type === 'NAV_BACK'; if (goingBack && !state.navHistory.length) break; const currentRoute = state.route; newState = Object.assign({}, state); const newNavHistory = state.navHistory.slice(); if (goingBack) { let newAction = null; while (newNavHistory.length) { newAction = newNavHistory.pop(); if (newAction.routeName !== state.route.routeName) break; } if (!newAction) break; action = newAction; } if (!goingBack) newNavHistory.push(currentRoute); newState.navHistory = newNavHistory; newState.route = action; } break; case 'WINDOW_CONTENT_SIZE_SET': newState = Object.assign({}, state); newState.windowContentSize = action.size; break; case 'WINDOW_COMMAND': { newState = Object.assign({}, state); const command = Object.assign({}, action); delete command.type; newState.windowCommand = command.name ? command : null; } break; case 'NOTE_VISIBLE_PANES_TOGGLE': { const getNextLayout = (currentLayout) => { currentLayout = panes.length === 2 ? 'both' : currentLayout[0]; let paneOptions; if (state.settings.layoutButtonSequence === Setting.LAYOUT_EDITOR_VIEWER) { paneOptions = ['editor', 'viewer']; } else if (state.settings.layoutButtonSequence === Setting.LAYOUT_EDITOR_SPLIT) { paneOptions = ['editor', 'both']; } else if (state.settings.layoutButtonSequence === Setting.LAYOUT_VIEWER_SPLIT) { paneOptions = ['viewer', 'both']; } else if (state.settings.layoutButtonSequence === Setting.LAYOUT_SPLIT_WYSIWYG) { paneOptions = ['both', 'wysiwyg']; } else { paneOptions = ['editor', 'viewer', 'both']; } const currentLayoutIndex = paneOptions.indexOf(currentLayout); const nextLayoutIndex = currentLayoutIndex === paneOptions.length - 1 ? 0 : currentLayoutIndex + 1; const nextLayout = paneOptions[nextLayoutIndex]; return nextLayout === 'both' ? ['editor', 'viewer'] : [nextLayout]; }; newState = Object.assign({}, state); const panes = state.noteVisiblePanes.slice(); newState.noteVisiblePanes = getNextLayout(panes); } break; case 'NOTE_VISIBLE_PANES_SET': newState = Object.assign({}, state); newState.noteVisiblePanes = action.panes; break; case 'SIDEBAR_VISIBILITY_TOGGLE': newState = Object.assign({}, state); newState.sidebarVisibility = !state.sidebarVisibility; break; case 'SIDEBAR_VISIBILITY_SET': newState = Object.assign({}, state); newState.sidebarVisibility = action.visibility; break; case 'NOTELIST_VISIBILITY_TOGGLE': newState = Object.assign({}, state); newState.noteListVisibility = !state.noteListVisibility; break; case 'NOTELIST_VISIBILITY_SET': newState = Object.assign({}, state); newState.noteListVisibility = action.visibility; break; case 'NOTE_FILE_WATCHER_ADD': if (newState.watchedNoteFiles.indexOf(action.id) < 0) { newState = Object.assign({}, state); const watchedNoteFiles = newState.watchedNoteFiles.slice(); watchedNoteFiles.push(action.id); newState.watchedNoteFiles = watchedNoteFiles; } break; case 'NOTE_FILE_WATCHER_REMOVE': { newState = Object.assign({}, state); const idx = newState.watchedNoteFiles.indexOf(action.id); if (idx >= 0) { const watchedNoteFiles = newState.watchedNoteFiles.slice(); watchedNoteFiles.splice(idx, 1); newState.watchedNoteFiles = watchedNoteFiles; } } break; case 'NOTE_FILE_WATCHER_CLEAR': if (state.watchedNoteFiles.length) { newState = Object.assign({}, state); newState.watchedNoteFiles = []; } break; case 'EDITOR_SCROLL_PERCENT_SET': { newState = Object.assign({}, state); const newPercents = Object.assign({}, newState.lastEditorScrollPercents); newPercents[action.noteId] = action.percent; newState.lastEditorScrollPercents = newPercents; } break; case 'NOTE_DEVTOOLS_TOGGLE': newState = Object.assign({}, state); newState.devToolsVisible = !newState.devToolsVisible; break; case 'NOTE_DEVTOOLS_SET': newState = Object.assign({}, state); newState.devToolsVisible = action.value; break; } } catch (error) { error.message = `In reducer: ${error.message} Action: ${JSON.stringify(action)}`; throw error; } return super.reducer(newState, action); } toggleDevTools(visible) { if (visible) { bridge().openDevTools(); } else { bridge().closeDevTools(); } } async generalMiddleware(store, next, action) { if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'locale' || action.type == 'SETTING_UPDATE_ALL') { setLocale(Setting.value('locale')); // The bridge runs within the main process, with its own instance of locale.js // so it needs to be set too here. bridge().setLocale(Setting.value('locale')); await this.refreshMenu(); } if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'showTrayIcon' || action.type == 'SETTING_UPDATE_ALL') { this.updateTray(); } if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'style.editor.fontFamily' || action.type == 'SETTING_UPDATE_ALL') { this.updateEditorFont(); } if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'windowContentZoomFactor' || action.type == 'SETTING_UPDATE_ALL') { webFrame.setZoomFactor(Setting.value('windowContentZoomFactor') / 100); } if (['EVENT_NOTE_ALARM_FIELD_CHANGE', 'NOTE_DELETE'].indexOf(action.type) >= 0) { await AlarmService.updateNoteNotification(action.id, action.type === 'NOTE_DELETE'); } const result = await super.generalMiddleware(store, next, action); const newState = store.getState(); if (action.type === 'NAV_GO' || action.type === 'NAV_BACK') { app().updateMenu(newState.route.routeName); } if (['NOTE_VISIBLE_PANES_TOGGLE', 'NOTE_VISIBLE_PANES_SET'].indexOf(action.type) >= 0) { Setting.setValue('noteVisiblePanes', newState.noteVisiblePanes); } if (['SIDEBAR_VISIBILITY_TOGGLE', 'SIDEBAR_VISIBILITY_SET'].indexOf(action.type) >= 0) { Setting.setValue('sidebarVisibility', newState.sidebarVisibility); } if (['NOTELIST_VISIBILITY_TOGGLE', 'NOTELIST_VISIBILITY_SET'].indexOf(action.type) >= 0) { Setting.setValue('noteListVisibility', newState.noteListVisibility); } if (action.type.indexOf('NOTE_SELECT') === 0 || action.type.indexOf('FOLDER_SELECT') === 0) { this.updateMenuItemStates(newState); } if (['NOTE_DEVTOOLS_TOGGLE', 'NOTE_DEVTOOLS_SET'].indexOf(action.type) >= 0) { this.toggleDevTools(newState.devToolsVisible); this.updateMenuItemStates(newState); } return result; } async refreshMenu() { const screen = this.lastMenuScreen_; this.lastMenuScreen_ = null; await this.updateMenu(screen); } focusElement_(target) { this.dispatch({ type: 'WINDOW_COMMAND', name: 'focusElement', target: target, }); } async updateMenu(screen) { if (this.lastMenuScreen_ === screen) return; const sortNoteFolderItems = (type) => { const sortItems = []; const sortOptions = Setting.enumOptions(`${type}.sortOrder.field`); for (const field in sortOptions) { if (!sortOptions.hasOwnProperty(field)) continue; sortItems.push({ label: sortOptions[field], screens: ['Main'], type: 'checkbox', checked: Setting.value(`${type}.sortOrder.field`) === field, click: () => { Setting.setValue(`${type}.sortOrder.field`, field); this.refreshMenu(); }, }); } sortItems.push({ type: 'separator' }); sortItems.push({ label: Setting.settingMetadata(`${type}.sortOrder.reverse`).label(), type: 'checkbox', checked: Setting.value(`${type}.sortOrder.reverse`), screens: ['Main'], click: () => { Setting.setValue(`${type}.sortOrder.reverse`, !Setting.value(`${type}.sortOrder.reverse`)); }, }); return sortItems; }; const sortNoteItems = sortNoteFolderItems('notes'); const sortFolderItems = sortNoteFolderItems('folders'); const focusItems = []; focusItems.push({ label: _('Sidebar'), click: () => { this.focusElement_('sideBar'); }, accelerator: 'CommandOrControl+Shift+S', }); focusItems.push({ label: _('Note list'), click: () => { this.focusElement_('noteList'); }, accelerator: 'CommandOrControl+Shift+L', }); focusItems.push({ label: _('Note title'), click: () => { this.focusElement_('noteTitle'); }, accelerator: 'CommandOrControl+Shift+N', }); focusItems.push({ label: _('Note body'), click: () => { this.focusElement_('noteBody'); }, accelerator: 'CommandOrControl+Shift+B', }); let toolsItems = []; const importItems = []; const exportItems = []; const toolsItemsFirst = []; const templateItems = []; const ioService = new InteropService(); const ioModules = ioService.modules(); for (let i = 0; i < ioModules.length; i++) { const module = ioModules[i]; if (module.type === 'exporter') { if (module.canDoMultiExport !== false) { exportItems.push({ label: module.fullLabel(), screens: ['Main'], click: async () => { await InteropServiceHelper.export(this.dispatch.bind(this), module); }, }); } } else { for (let j = 0; j < module.sources.length; j++) { const moduleSource = module.sources[j]; importItems.push({ label: module.fullLabel(moduleSource), screens: ['Main'], click: async () => { let path = null; const selectedFolderId = this.store().getState().selectedFolderId; if (moduleSource === 'file') { path = bridge().showOpenDialog({ filters: [{ name: module.description, extensions: module.fileExtensions }], }); } else { path = bridge().showOpenDialog({ properties: ['openDirectory', 'createDirectory'], }); } if (!path || (Array.isArray(path) && !path.length)) return; if (Array.isArray(path)) path = path[0]; this.dispatch({ type: 'WINDOW_COMMAND', name: 'showModalMessage', message: _('Importing from "%s" as "%s" format. Please wait...', path, module.format), }); const importOptions = { path, format: module.format, modulePath: module.path, onError: console.warn, destinationFolderId: !module.isNoteArchive && moduleSource === 'file' ? selectedFolderId : null, }; const service = new InteropService(); try { const result = await service.import(importOptions); console.info('Import result: ', result); } catch (error) { bridge().showErrorMessageBox(error.message); } this.dispatch({ type: 'WINDOW_COMMAND', name: 'hideModalMessage', }); }, }); } } } exportItems.push({ label: `PDF - ${_('PDF File')}`, screens: ['Main'], click: async () => { const selectedNoteIds = this.store().getState().selectedNoteIds; this.dispatch({ type: 'WINDOW_COMMAND', name: 'exportPdf', noteIds: selectedNoteIds, }); }, }); // We need a dummy entry, otherwise the ternary operator to show a // menu item only on a specific OS does not work. const noItem = { type: 'separator', visible: false, }; const syncStatusItem = { label: _('Synchronisation Status'), click: () => { this.dispatch({ type: 'NAV_GO', routeName: 'Status', }); }, }; const newNoteItem = { label: _('New note'), accelerator: 'CommandOrControl+N', screens: ['Main'], click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'newNote', }); }, }; const newTodoItem = { label: _('New to-do'), accelerator: 'CommandOrControl+T', screens: ['Main'], click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'newTodo', }); }, }; const newNotebookItem = { label: _('New notebook'), screens: ['Main'], click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'newNotebook', }); }, }; const newSubNotebookItem = { label: _('New sub-notebook'), screens: ['Main'], click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'newSubNotebook', activeFolderId: Setting.value('activeFolderId'), }); }, }; const printItem = { label: _('Print'), accelerator: 'CommandOrControl+P', screens: ['Main'], click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'print', }); }, }; toolsItemsFirst.push(syncStatusItem, { type: 'separator', screens: ['Main'], }); const templateDirExists = await shim.fsDriver().exists(Setting.value('templateDir')); templateItems.push({ label: _('Create note from template'), visible: templateDirExists, click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'selectTemplate', noteType: 'note', }); }, }, { label: _('Create to-do from template'), visible: templateDirExists, click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'selectTemplate', noteType: 'todo', }); }, }, { label: _('Insert template'), visible: templateDirExists, accelerator: 'CommandOrControl+Alt+I', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'selectTemplate', }); }, }, { label: _('Open template directory'), click: () => { const templateDir = Setting.value('templateDir'); if (!templateDirExists) shim.fsDriver().mkdir(templateDir); shell.openItem(templateDir); }, }, { label: _('Refresh templates'), click: async () => { const templates = await TemplateUtils.loadTemplates(Setting.value('templateDir')); this.store().dispatch({ type: 'TEMPLATE_UPDATE_ALL', templates: templates, }); }, }); // we need this workaround, because on macOS the menu is different const toolsItemsWindowsLinux = toolsItemsFirst.concat([{ label: _('Options'), visible: !shim.isMac(), accelerator: 'CommandOrControl+,', click: () => { this.dispatch({ type: 'NAV_GO', routeName: 'Config', }); }, }]); // the following menu items will be available for all OS under Tools const toolsItemsAll = [{ label: _('Resources'), click: () => { this.dispatch({ type: 'NAV_GO', routeName: 'Resources', }); }, }]; if (!shim.isMac()) { toolsItems = toolsItems.concat(toolsItemsWindowsLinux); } toolsItems = toolsItems.concat(toolsItemsAll); function _checkForUpdates(ctx) { bridge().checkForUpdates(false, bridge().window(), ctx.checkForUpdateLoggerPath(), { includePreReleases: Setting.value('autoUpdate.includePreReleases') }); } function _showAbout() { const p = packageInfo; let gitInfo = ''; if ('git' in p) { gitInfo = _('Revision: %s (%s)', p.git.hash, p.git.branch); } const copyrightText = 'Copyright © 2016-YYYY Laurent Cozic'; const message = [ p.description, '', copyrightText.replace('YYYY', new Date().getFullYear()), _('%s %s (%s, %s)', p.name, p.version, Setting.value('env'), process.platform), '', _('Client ID: %s', Setting.value('clientId')), _('Sync Version: %s', Setting.value('syncVersion')), _('Profile Version: %s', reg.db().version()), ]; if (gitInfo) { message.push(`\n${gitInfo}`); console.info(gitInfo); } const text = message.join('\n'); const copyToClipboard = bridge().showMessageBox(text, { icon: `${bridge().electronApp().buildDir()}/icons/128x128.png`, buttons: [_('Copy'), _('OK')], cancelId: 1, defaultId: 1, }); if (copyToClipboard === 0) { clipboard.writeText(message.splice(3).join('\n')); } } const rootMenuFile = { // Using a dummy entry for macOS here, because first menu // becomes 'Joplin' and we need a nenu called 'File' later. label: shim.isMac() ? '&JoplinMainMenu' : _('&File'), // `&` before one of the char in the label name mean, that // <Alt + F> will open this menu. It's needed becase electron // opens the first menu on Alt press if no hotkey assigned. // Issue: https://github.com/laurent22/joplin/issues/934 submenu: [{ label: _('About Joplin'), visible: shim.isMac() ? true : false, click: () => _showAbout(), }, { type: 'separator', visible: shim.isMac() ? true : false, }, { label: _('Preferences...'), visible: shim.isMac() ? true : false, accelerator: 'CommandOrControl+,', click: () => { this.dispatch({ type: 'NAV_GO', routeName: 'Config', }); }, }, { label: _('Check for updates...'), visible: shim.isMac() ? true : false, click: () => _checkForUpdates(this), }, { type: 'separator', visible: shim.isMac() ? true : false, }, shim.isMac() ? noItem : newNoteItem, shim.isMac() ? noItem : newTodoItem, shim.isMac() ? noItem : newNotebookItem, shim.isMac() ? noItem : newSubNotebookItem, { type: 'separator', visible: shim.isMac() ? false : true, }, { label: _('Templates'), visible: shim.isMac() ? false : true, submenu: templateItems, }, { type: 'separator', visible: shim.isMac() ? false : true, }, { label: _('Import'), visible: shim.isMac() ? false : true, submenu: importItems, }, { label: _('Export'), visible: shim.isMac() ? false : true, submenu: exportItems, }, { type: 'separator', }, { label: _('Synchronise'), accelerator: 'CommandOrControl+S', screens: ['Main'], click: async () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'synchronize', }); }, }, shim.isMac() ? syncStatusItem : noItem, { type: 'separator', }, shim.isMac() ? noItem : printItem, { type: 'separator', platforms: ['darwin'], }, { label: _('Hide %s', 'Joplin'), platforms: ['darwin'], accelerator: 'CommandOrControl+H', click: () => { bridge().electronApp().hide(); }, }, { type: 'separator', }, { label: _('Quit'), accelerator: 'CommandOrControl+Q', click: () => { bridge().electronApp().quit(); }, }], }; const rootMenuFileMacOs = { label: _('&File'), visible: shim.isMac() ? true : false, submenu: [ newNoteItem, newTodoItem, newNotebookItem, newSubNotebookItem, { label: _('Close Window'), platforms: ['darwin'], accelerator: 'Command+W', selector: 'performClose:', }, { type: 'separator', }, { label: _('Templates'), submenu: templateItems, }, { type: 'separator', }, { label: _('Import'), submenu: importItems, }, { label: _('Export'), submenu: exportItems, }, { type: 'separator', }, printItem, ], }; const layoutButtonSequenceOptions = Object.entries(Setting.enumOptions('layoutButtonSequence')).map(([layoutKey, layout]) => ({ label: layout, screens: ['Main'], type: 'checkbox', checked: Setting.value('layoutButtonSequence') == layoutKey, click: () => { Setting.setValue('layoutButtonSequence', layoutKey); this.refreshMenu(); }, })); const rootMenus = { edit: { id: 'edit', label: _('&Edit'), submenu: [{ id: 'edit:copy', label: _('Copy'), role: 'copy', accelerator: 'CommandOrControl+C', }, { id: 'edit:cut', label: _('Cut'), role: 'cut', accelerator: 'CommandOrControl+X', }, { id: 'edit:paste', label: _('Paste'), role: 'paste', accelerator: 'CommandOrControl+V', }, { id: 'edit:selectAll', label: _('Select all'), role: 'selectall', accelerator: 'CommandOrControl+A', }, { type: 'separator', screens: ['Main'], }, { id: 'edit:bold', label: _('Bold'), screens: ['Main'], accelerator: 'CommandOrControl+B', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'textBold', }); }, }, { id: 'edit:italic', label: _('Italic'), screens: ['Main'], accelerator: 'CommandOrControl+I', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'textItalic', }); }, }, { id: 'edit:link', label: _('Link'), screens: ['Main'], accelerator: 'CommandOrControl+K', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'textLink', }); }, }, { id: 'edit:code', label: _('Code'), screens: ['Main'], accelerator: 'CommandOrControl+`', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'textCode', }); }, }, { type: 'separator', screens: ['Main'], }, { id: 'edit:insertDateTime', label: _('Insert Date Time'), screens: ['Main'], accelerator: 'CommandOrControl+Shift+T', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'insertDateTime', }); }, }, { type: 'separator', screens: ['Main'], }, { id: 'edit:commandStartExternalEditing', label: _('Edit in external editor'), screens: ['Main'], accelerator: 'CommandOrControl+E', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'commandStartExternalEditing', }); }, }, { id: 'edit:setTags', label: _('Tags'), screens: ['Main'], accelerator: 'CommandOrControl+Alt+T', click: () => { const selectedNoteIds = this.store().getState().selectedNoteIds; this.dispatch({ type: 'WINDOW_COMMAND', name: 'setTags', noteIds: selectedNoteIds, }); }, }, { type: 'separator', screens: ['Main'], }, { id: 'edit:focusSearch', label: _('Search in all the notes'), screens: ['Main'], accelerator: shim.isMac() ? 'Shift+Command+F' : 'F6', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'focusSearch', }); }, }, { id: 'edit:showLocalSearch', label: _('Search in current note'), screens: ['Main'], accelerator: 'CommandOrControl+F', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'showLocalSearch', }); }, }], }, view: { label: _('&View'), submenu: [{ label: _('Toggle sidebar'), screens: ['Main'], accelerator: shim.isMac() ? 'Option+Command+S' : 'F10', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'toggleSidebar', }); }, }, { type: 'separator', screens: ['Main'], }, { label: _('Layout button sequence'), screens: ['Main'], submenu: layoutButtonSequenceOptions, }, { label: _('Toggle note list'), screens: ['Main'], click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'toggleNoteList', }); }, }, { label: _('Toggle editor layout'), screens: ['Main'], accelerator: 'CommandOrControl+L', click: () => { this.dispatch({ type: 'WINDOW_COMMAND', name: 'toggleVisiblePanes', }); }, }, { type: 'separator', screens: ['Main'], }, { label: Setting.settingMetadata('notes.sortOrder.field').label(), screens: ['Main'], submenu: sortNoteItems, }, { label: Setting.settingMetadata('folders.sortOrder.field').label(), screens: ['Main'], submenu: sortFolderItems, }, { label: Setting.settingMetadata('showNoteCounts').label(), type: 'checkbox', checked: Setting.value('showNoteCounts'), screens: ['Main'], click: () => { Setting.setValue('showNoteCounts', !Setting.value('showNoteCounts')); }, }, { label: Setting.settingMetadata('uncompletedTodosOnTop').label(), type: 'checkbox', checked: Setting.value('uncompletedTodosOnTop'), screens: ['Main'], click: () => { Setting.setValue('uncompletedTodosOnTop', !Setting.value('uncompletedTodosOnTop')); }, }, { label: Setting.settingMetadata('showCompletedTodos').label(), type: 'checkbox', checked: Setting.value('showCompletedTodos'), screens: ['Main'], click: () => { Setting.setValue('showCompletedTodos', !Setting.value('showCompletedTodos')); }, }, { type: 'separator', screens: ['Main'], }, { label: _('Focus'), screens: ['Main'], submenu: focusItems, }, { type: 'separator', screens: ['Main'], }, { label: _('Actual Size'), click: () => { Setting.setValue('windowContentZoomFactor', 100); }, accelerator: 'CommandOrControl+0', }, { label: _('Zoom In'), click: () => { Setting.incValue('windowContentZoomFactor', 10); }, accelerator: 'CommandOrControl+=', }, { label: _('Zoom Out'), click: () => { Setting.incValue('windowContentZoomFactor', -10); }, accelerator: 'CommandOrControl+-', }], }, tools: { label: _('&Tools'), submenu: toolsItems, }, help: { label: _('&Help'), submenu: [{ label: _('Website and documentation'), accelerator: 'F1', click() { bridge().openExternal('https://joplinapp.org'); }, }, { label: _('Joplin Forum'), click() { bridge().openExternal('https://discourse.joplinapp.org'); }, }, { label: _('Make a donation'), click() { bridge().openExternal('https://joplinapp.org/donate/'); }, }, { label: _('Check for updates...'), visible: shim.isMac() ? false : true, click: () => _checkForUpdates(this), }, { type: 'separator', screens: ['Main'], }, { id: 'help:toggleDevTools', type: 'checkbox', label: _('Toggle development tools'), visible: true, click: () => { this.dispatch({ type: 'NOTE_DEVTOOLS_TOGGLE', }); }, }, { type: 'separator', visible: shim.isMac() ? false : true, screens: ['Main'], }, { label: _('About Joplin'), visible: shim.isMac() ? false : true, click: () => _showAbout(), }], }, }; if (shim.isMac()) { rootMenus.macOsApp = rootMenuFile; rootMenus.file = rootMenuFileMacOs; } else { rootMenus.file = rootMenuFile; } // It seems the "visible" property of separators is ignored by Electron, making // it display separators that we want hidden. So this function iterates through // them and remove them completely. const cleanUpSeparators = items => { const output = []; for (const item of items) { if ('visible' in item && item.type === 'separator' && !item.visible) continue; output.push(item); } return output; }; for (const key in rootMenus) { if (!rootMenus.hasOwnProperty(key)) continue; if (!rootMenus[key].submenu) continue; rootMenus[key].submenu = cleanUpSeparators(rootMenus[key].submenu); } const pluginMenuItems = PluginManager.instance().menuItems(); for (const item of pluginMenuItems) { const itemParent = rootMenus[item.parent] ? rootMenus[item.parent] : 'tools'; itemParent.submenu.push(item); } const template = [ rootMenus.file, rootMenus.edit, rootMenus.view, rootMenus.tools, rootMenus.help, ]; if (shim.isMac()) template.splice(0, 0, rootMenus.macOsApp); function isEmptyMenu(template) { for (let i = 0; i < template.length; i++) { const t = template[i]; if (t.type !== 'separator') return false; } return true; } function removeUnwantedItems(template, screen) { const platform = shim.platformName(); let output = []; for (let i = 0; i < template.length; i++) { const t = Object.assign({}, template[i]); if (t.screens && t.screens.indexOf(screen) < 0) continue; if (t.platforms && t.platforms.indexOf(platform) < 0) continue; if (t.submenu) t.submenu = removeUnwantedItems(t.submenu, screen); if (('submenu' in t) && isEmptyMenu(t.submenu)) continue; output.push(t); } // Remove empty separator for now empty sections const temp = []; let previous = null; for (let i = 0; i < output.length; i++) { const t = Object.assign({}, output[i]); if (t.type === 'separator') { if (!previous) continue; if (previous.type === 'separator') continue; } temp.push(t); previous = t; } output = temp; return output; } const screenTemplate = removeUnwantedItems(template, screen); const menu = Menu.buildFromTemplate(screenTemplate); Menu.setApplicationMenu(menu); this.lastMenuScreen_ = screen; } async updateMenuItemStates(state = null) { if (!this.lastMenuScreen_) return; if (!this.store() && !state) return; if (!state) state = this.store().getState(); const selectedNoteIds = state.selectedNoteIds; const note = selectedNoteIds.length === 1 ? await Note.load(selectedNoteIds[0]) : null; for (const itemId of ['copy', 'paste', 'cut', 'selectAll', 'bold', 'italic', 'link', 'code', 'insertDateTime', 'commandStartExternalEditing', 'showLocalSearch']) { const menuItem = Menu.getApplicationMenu().getMenuItemById(`edit:${itemId}`); if (!menuItem) continue; menuItem.enabled = !!note && note.markup_language === MarkupToHtml.MARKUP_LANGUAGE_MARKDOWN; } const menuItem = Menu.getApplicationMenu().getMenuItemById('help:toggleDevTools'); menuItem.checked = state.devToolsVisible; } updateTray() { const app = bridge().electronApp(); if (app.trayShown() === Setting.value('showTrayIcon')) return; if (!Setting.value('showTrayIcon')) { app.destroyTray(); } else { const contextMenu = Menu.buildFromTemplate([ { label: _('Open %s', app.electronApp().name), click: () => { app.window().show(); } }, { type: 'separator' }, { label: _('Exit'), click: () => { app.quit(); } }, ]); app.createTray(contextMenu); } } updateEditorFont() { const fontFamilies = []; if (Setting.value('style.editor.fontFamily')) fontFamilies.push(`"${Setting.value('style.editor.fontFamily')}"`); fontFamilies.push('monospace'); // The '*' and '!important' parts are necessary to make sure Russian text is displayed properly // https://github.com/laurent22/joplin/issues/155 const css = `.ace_editor * { font-family: ${fontFamilies.join(', ')} !important; }`; const styleTag = document.createElement('style'); styleTag.type = 'text/css'; styleTag.appendChild(document.createTextNode(css)); document.head.appendChild(styleTag); } async loadCustomCss(filePath) { let cssString = ''; if (await fs.pathExists(filePath)) { try { cssString = await fs.readFile(filePath, 'utf-8'); } catch (error) { let msg = error.message ? error.message : ''; msg = `Could not load custom css from ${filePath}\n${msg}`; error.message = msg; throw error; } } return cssString; } // async createManyNotes() { // return; // const folderIds = []; // const randomFolderId = (folderIds) => { // if (!folderIds.length) return ''; // const idx = Math.floor(Math.random() * folderIds.length); // if (idx > folderIds.length - 1) throw new Error('Invalid index ' + idx + ' / ' + folderIds.length); // return folderIds[idx]; // } // let rootFolderCount = 0; // let folderCount = 100; // for (let i = 0; i < folderCount; i++) { // let parentId = ''; // if (Math.random() >= 0.9 || rootFolderCount >= folderCount / 10) { // parentId = randomFolderId(folderIds); // } else { // rootFolderCount++; // } // const folder = await Folder.save({ title: 'folder' + i, parent_id: parentId }); // folderIds.push(folder.id); // } // for (let i = 0; i < 10000; i++) { // const parentId = randomFolderId(folderIds); // Note.save({ title: 'note' + i, parent_id: parentId }); // } // } async start(argv) { const electronIsDev = require('electron-is-dev'); // If running inside a package, the command line, instead of being "node.exe <path> <flags>" is "joplin.exe <flags>" so // insert an extra argument so that they can be processed in a consistent way everywhere. if (!electronIsDev) argv.splice(1, 0, '.'); argv = await super.start(argv); // Loads app-wide styles. (Markdown preview-specific styles loaded in app.js) const dir = Setting.value('profileDir'); const filename = Setting.custom_css_files.JOPLIN_APP; await CssUtils.injectCustomStyles(`${dir}/${filename}`); AlarmService.setDriver(new AlarmServiceDriverNode({ appName: packageInfo.build.appId })); AlarmService.setLogger(reg.logger()); reg.setShowErrorMessageBoxHandler((message) => { bridge().showErrorMessageBox(message); }); if (Setting.value('flagOpenDevTools')) { bridge().openDevTools(); } PluginManager.instance().dispatch_ = this.dispatch.bind(this); PluginManager.instance().setLogger(reg.logger()); PluginManager.instance().register(pluginClasses); this.updateMenu('Main'); this.initRedux(); // Since the settings need to be loaded before the store is created, it will never // receive the SETTING_UPDATE_ALL even, which mean state.settings will not be // initialised. So we manually call dispatchUpdateAll() to force an update. Setting.dispatchUpdateAll(); await FoldersScreenUtils.refreshFolders(); const tags = await Tag.allWithNotes(); this.dispatch({ type: 'TAG_UPDATE_ALL', items: tags, }); const masterKeys = await MasterKey.all(); this.dispatch({ type: 'MASTERKEY_UPDATE_ALL', items: masterKeys, }); this.store().dispatch({ type: 'FOLDER_SELECT', id: Setting.value('activeFolderId'), }); this.store().dispatch({ type: 'FOLDER_SET_COLLAPSED_ALL', ids: Setting.value('collapsedFolderIds'), }); // Loads custom Markdown preview styles const cssString = await CssUtils.loadCustomCss(`${Setting.value('profileDir')}/userstyle.css`); this.store().dispatch({ type: 'LOAD_CUSTOM_CSS', css: cssString, }); const templates = await TemplateUtils.loadTemplates(Setting.value('templateDir')); this.store().dispatch({ type: 'TEMPLATE_UPDATE_ALL', templates: templates, }); this.store().dispatch({ type: 'NOTE_DEVTOOLS_SET', value: Setting.value('flagOpenDevTools'), }); // Note: Auto-update currently doesn't work in Linux: it downloads the update // but then doesn't install it on exit. if (shim.isWindows() || shim.isMac()) { const runAutoUpdateCheck = () => { if (Setting.value('autoUpdateEnabled')) { bridge().checkForUpdates(true, bridge().window(), this.checkForUpdateLoggerPath(), { includePreReleases: Setting.value('autoUpdate.includePreReleases') }); } }; // Initial check on startup setTimeout(() => { runAutoUpdateCheck(); }, 5000); // Then every x hours setInterval(() => { runAutoUpdateCheck(); }, 12 * 60 * 60 * 1000); } this.updateTray(); setTimeout(() => { AlarmService.garbageCollect(); }, 1000 * 60 * 60); if (Setting.value('startMinimized') && Setting.value('showTrayIcon')) { // Keep it hidden } else { bridge().window().show(); } ResourceService.runInBackground(); if (Setting.value('env') === 'dev') { AlarmService.updateAllNotifications(); } else { reg.scheduleSync().then(() => { // Wait for the first sync before updating the notifications, since synchronisation // might change the notifications. AlarmService.updateAllNotifications(); DecryptionWorker.instance().scheduleStart(); }); } const clipperLogger = new Logger(); clipperLogger.addTarget('file', { path: `${Setting.value('profileDir')}/log-clipper.txt` }); clipperLogger.addTarget('console'); ClipperServer.instance().setLogger(clipperLogger); ClipperServer.instance().setDispatch(this.store().dispatch); if (Setting.value('clipperServer.autoStart')) { ClipperServer.instance().start(); } ExternalEditWatcher.instance().setLogger(reg.logger()); ExternalEditWatcher.instance().dispatch = this.store().dispatch; RevisionService.instance().runInBackground(); this.updateMenuItemStates(); // Make it available to the console window - useful to call revisionService.collectRevisions() window.revisionService = RevisionService.instance(); window.migrationService = MigrationService.instance(); window.decryptionWorker = DecryptionWorker.instance(); } } let application_ = null; function app() { if (!application_) application_ = new Application(); return application_; } module.exports = { app };
1
13,506
It would be better to move the ... outside of the l8n block. This way it won't need to be translated for each language twice.
laurent22-joplin
js
@@ -35,6 +35,11 @@ #include "monster.h" #include "scheduler.h" #include "databasetasks.h" +#include "actions.h" +#include "movement.h" +#include "weapons.h" +#include "globalevent.h" +#include "events.h" extern Chat* g_chat; extern Game g_game;
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2017 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include <boost/range/adaptor/reversed.hpp> #include "luascript.h" #include "chat.h" #include "player.h" #include "game.h" #include "protocolstatus.h" #include "spells.h" #include "iologindata.h" #include "configmanager.h" #include "teleport.h" #include "databasemanager.h" #include "bed.h" #include "monster.h" #include "scheduler.h" #include "databasetasks.h" extern Chat* g_chat; extern Game g_game; extern Monsters g_monsters; extern ConfigManager g_config; extern Vocations g_vocations; extern Spells* g_spells; ScriptEnvironment::DBResultMap ScriptEnvironment::tempResults; uint32_t ScriptEnvironment::lastResultId = 0; std::multimap<ScriptEnvironment*, Item*> ScriptEnvironment::tempItems; LuaEnvironment g_luaEnvironment; ScriptEnvironment::ScriptEnvironment() { resetEnv(); } ScriptEnvironment::~ScriptEnvironment() { resetEnv(); } void ScriptEnvironment::resetEnv() { scriptId = 0; callbackId = 0; timerEvent = false; interface = nullptr; localMap.clear(); tempResults.clear(); auto pair = tempItems.equal_range(this); auto it = pair.first; while (it != pair.second) { Item* item = it->second; if (item->getParent() == VirtualCylinder::virtualCylinder) { g_game.ReleaseItem(item); } it = tempItems.erase(it); } } bool ScriptEnvironment::setCallbackId(int32_t callbackId, LuaScriptInterface* scriptInterface) { if (this->callbackId != 0) { //nested callbacks are not allowed if (interface) { interface->reportErrorFunc("Nested callbacks!"); } return false; } this->callbackId = callbackId; interface = scriptInterface; return true; } void ScriptEnvironment::getEventInfo(int32_t& scriptId, LuaScriptInterface*& scriptInterface, int32_t& callbackId, bool& timerEvent) const { scriptId = this->scriptId; scriptInterface = interface; callbackId = this->callbackId; timerEvent = this->timerEvent; } uint32_t ScriptEnvironment::addThing(Thing* thing) { if (!thing || thing->isRemoved()) { return 0; } Creature* creature = thing->getCreature(); if (creature) { return creature->getID(); } Item* item = thing->getItem(); if (item && item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { return item->getUniqueId(); } for (const auto& it : localMap) { if (it.second == item) { return it.first; } } localMap[++lastUID] = item; return lastUID; } void ScriptEnvironment::insertItem(uint32_t uid, Item* item) { auto result = localMap.emplace(uid, item); if (!result.second) { std::cout << std::endl << "Lua Script Error: Thing uid already taken."; } } Thing* ScriptEnvironment::getThingByUID(uint32_t uid) { if (uid >= 0x10000000) { return g_game.getCreatureByID(uid); } if (uid <= std::numeric_limits<uint16_t>::max()) { Item* item = g_game.getUniqueItem(uid); if (item && !item->isRemoved()) { return item; } return nullptr; } auto it = localMap.find(uid); if (it != localMap.end()) { Item* item = it->second; if (!item->isRemoved()) { return item; } } return nullptr; } Item* ScriptEnvironment::getItemByUID(uint32_t uid) { Thing* thing = getThingByUID(uid); if (!thing) { return nullptr; } return thing->getItem(); } Container* ScriptEnvironment::getContainerByUID(uint32_t uid) { Item* item = getItemByUID(uid); if (!item) { return nullptr; } return item->getContainer(); } void ScriptEnvironment::removeItemByUID(uint32_t uid) { if (uid <= std::numeric_limits<uint16_t>::max()) { g_game.removeUniqueItem(uid); return; } auto it = localMap.find(uid); if (it != localMap.end()) { localMap.erase(it); } } void ScriptEnvironment::addTempItem(Item* item) { tempItems.emplace(this, item); } void ScriptEnvironment::removeTempItem(Item* item) { for (auto it = tempItems.begin(), end = tempItems.end(); it != end; ++it) { if (it->second == item) { tempItems.erase(it); break; } } } uint32_t ScriptEnvironment::addResult(DBResult_ptr res) { tempResults[++lastResultId] = res; return lastResultId; } bool ScriptEnvironment::removeResult(uint32_t id) { auto it = tempResults.find(id); if (it == tempResults.end()) { return false; } tempResults.erase(it); return true; } DBResult_ptr ScriptEnvironment::getResultByID(uint32_t id) { auto it = tempResults.find(id); if (it == tempResults.end()) { return nullptr; } return it->second; } std::string LuaScriptInterface::getErrorDesc(ErrorCode_t code) { switch (code) { case LUA_ERROR_PLAYER_NOT_FOUND: return "Player not found"; case LUA_ERROR_CREATURE_NOT_FOUND: return "Creature not found"; case LUA_ERROR_ITEM_NOT_FOUND: return "Item not found"; case LUA_ERROR_THING_NOT_FOUND: return "Thing not found"; case LUA_ERROR_TILE_NOT_FOUND: return "Tile not found"; case LUA_ERROR_HOUSE_NOT_FOUND: return "House not found"; case LUA_ERROR_COMBAT_NOT_FOUND: return "Combat not found"; case LUA_ERROR_CONDITION_NOT_FOUND: return "Condition not found"; case LUA_ERROR_AREA_NOT_FOUND: return "Area not found"; case LUA_ERROR_CONTAINER_NOT_FOUND: return "Container not found"; case LUA_ERROR_VARIANT_NOT_FOUND: return "Variant not found"; case LUA_ERROR_VARIANT_UNKNOWN: return "Unknown variant type"; case LUA_ERROR_SPELL_NOT_FOUND: return "Spell not found"; default: return "Bad error code"; } } ScriptEnvironment LuaScriptInterface::scriptEnv[16]; int32_t LuaScriptInterface::scriptEnvIndex = -1; LuaScriptInterface::LuaScriptInterface(std::string interfaceName) : interfaceName(std::move(interfaceName)) { if (!g_luaEnvironment.getLuaState()) { g_luaEnvironment.initState(); } } LuaScriptInterface::~LuaScriptInterface() { closeState(); } bool LuaScriptInterface::reInitState() { g_luaEnvironment.clearCombatObjects(this); g_luaEnvironment.clearAreaObjects(this); closeState(); return initState(); } /// Same as lua_pcall, but adds stack trace to error strings in called function. int LuaScriptInterface::protectedCall(lua_State* L, int nargs, int nresults) { int error_index = lua_gettop(L) - nargs; lua_pushcfunction(L, luaErrorHandler); lua_insert(L, error_index); int ret = lua_pcall(L, nargs, nresults, error_index); lua_remove(L, error_index); return ret; } int32_t LuaScriptInterface::loadFile(const std::string& file, Npc* npc /* = nullptr*/) { //loads file as a chunk at stack top int ret = luaL_loadfile(luaState, file.c_str()); if (ret != 0) { lastLuaError = popString(luaState); return -1; } //check that it is loaded as a function if (!isFunction(luaState, -1)) { return -1; } loadingFile = file; if (!reserveScriptEnv()) { return -1; } ScriptEnvironment* env = getScriptEnv(); env->setScriptId(EVENT_ID_LOADING, this); env->setNpc(npc); //execute it ret = protectedCall(luaState, 0, 0); if (ret != 0) { reportError(nullptr, popString(luaState)); resetScriptEnv(); return -1; } resetScriptEnv(); return 0; } int32_t LuaScriptInterface::getEvent(const std::string& eventName) { //get our events table lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef); if (!isTable(luaState, -1)) { lua_pop(luaState, 1); return -1; } //get current event function pointer lua_getglobal(luaState, eventName.c_str()); if (!isFunction(luaState, -1)) { lua_pop(luaState, 2); return -1; } //save in our events table lua_pushvalue(luaState, -1); lua_rawseti(luaState, -3, runningEventId); lua_pop(luaState, 2); //reset global value of this event lua_pushnil(luaState); lua_setglobal(luaState, eventName.c_str()); cacheFiles[runningEventId] = loadingFile + ":" + eventName; return runningEventId++; } int32_t LuaScriptInterface::getMetaEvent(const std::string& globalName, const std::string& eventName) { //get our events table lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef); if (!isTable(luaState, -1)) { lua_pop(luaState, 1); return -1; } //get current event function pointer lua_getglobal(luaState, globalName.c_str()); lua_getfield(luaState, -1, eventName.c_str()); if (!isFunction(luaState, -1)) { lua_pop(luaState, 3); return -1; } //save in our events table lua_pushvalue(luaState, -1); lua_rawseti(luaState, -4, runningEventId); lua_pop(luaState, 1); //reset global value of this event lua_pushnil(luaState); lua_setfield(luaState, -2, eventName.c_str()); lua_pop(luaState, 2); cacheFiles[runningEventId] = loadingFile + ":" + globalName + "@" + eventName; return runningEventId++; } const std::string& LuaScriptInterface::getFileById(int32_t scriptId) { if (scriptId == EVENT_ID_LOADING) { return loadingFile; } auto it = cacheFiles.find(scriptId); if (it == cacheFiles.end()) { static const std::string& unk = "(Unknown scriptfile)"; return unk; } return it->second; } std::string LuaScriptInterface::getStackTrace(const std::string& error_desc) { lua_getglobal(luaState, "debug"); if (!isTable(luaState, -1)) { lua_pop(luaState, 1); return error_desc; } lua_getfield(luaState, -1, "traceback"); if (!isFunction(luaState, -1)) { lua_pop(luaState, 2); return error_desc; } lua_replace(luaState, -2); pushString(luaState, error_desc); lua_call(luaState, 1, 1); return popString(luaState); } void LuaScriptInterface::reportError(const char* function, const std::string& error_desc, bool stack_trace/* = false*/) { int32_t scriptId; int32_t callbackId; bool timerEvent; LuaScriptInterface* scriptInterface; getScriptEnv()->getEventInfo(scriptId, scriptInterface, callbackId, timerEvent); std::cout << std::endl << "Lua Script Error: "; if (scriptInterface) { std::cout << '[' << scriptInterface->getInterfaceName() << "] " << std::endl; if (timerEvent) { std::cout << "in a timer event called from: " << std::endl; } if (callbackId) { std::cout << "in callback: " << scriptInterface->getFileById(callbackId) << std::endl; } std::cout << scriptInterface->getFileById(scriptId) << std::endl; } if (function) { std::cout << function << "(). "; } if (stack_trace && scriptInterface) { std::cout << scriptInterface->getStackTrace(error_desc) << std::endl; } else { std::cout << error_desc << std::endl; } } bool LuaScriptInterface::pushFunction(int32_t functionId) { lua_rawgeti(luaState, LUA_REGISTRYINDEX, eventTableRef); if (!isTable(luaState, -1)) { return false; } lua_rawgeti(luaState, -1, functionId); lua_replace(luaState, -2); return isFunction(luaState, -1); } bool LuaScriptInterface::initState() { luaState = g_luaEnvironment.getLuaState(); if (!luaState) { return false; } lua_newtable(luaState); eventTableRef = luaL_ref(luaState, LUA_REGISTRYINDEX); runningEventId = EVENT_ID_USER; return true; } bool LuaScriptInterface::closeState() { if (!g_luaEnvironment.getLuaState() || !luaState) { return false; } cacheFiles.clear(); if (eventTableRef != -1) { luaL_unref(luaState, LUA_REGISTRYINDEX, eventTableRef); eventTableRef = -1; } luaState = nullptr; return true; } int LuaScriptInterface::luaErrorHandler(lua_State* L) { const std::string& errorMessage = popString(L); auto interface = getScriptEnv()->getScriptInterface(); assert(interface); //This fires if the ScriptEnvironment hasn't been setup pushString(L, interface->getStackTrace(errorMessage)); return 1; } bool LuaScriptInterface::callFunction(int params) { bool result = false; int size = lua_gettop(luaState); if (protectedCall(luaState, params, 1) != 0) { LuaScriptInterface::reportError(nullptr, LuaScriptInterface::getString(luaState, -1)); } else { result = LuaScriptInterface::getBoolean(luaState, -1); } lua_pop(luaState, 1); if ((lua_gettop(luaState) + params + 1) != size) { LuaScriptInterface::reportError(nullptr, "Stack size changed!"); } resetScriptEnv(); return result; } void LuaScriptInterface::callVoidFunction(int params) { int size = lua_gettop(luaState); if (protectedCall(luaState, params, 0) != 0) { LuaScriptInterface::reportError(nullptr, LuaScriptInterface::popString(luaState)); } if ((lua_gettop(luaState) + params + 1) != size) { LuaScriptInterface::reportError(nullptr, "Stack size changed!"); } resetScriptEnv(); } void LuaScriptInterface::pushVariant(lua_State* L, const LuaVariant& var) { lua_createtable(L, 0, 2); setField(L, "type", var.type); switch (var.type) { case VARIANT_NUMBER: setField(L, "number", var.number); break; case VARIANT_STRING: setField(L, "string", var.text); break; case VARIANT_TARGETPOSITION: case VARIANT_POSITION: { pushPosition(L, var.pos); lua_setfield(L, -2, "pos"); break; } default: break; } setMetatable(L, -1, "Variant"); } void LuaScriptInterface::pushThing(lua_State* L, Thing* thing) { if (!thing) { lua_createtable(L, 0, 4); setField(L, "uid", 0); setField(L, "itemid", 0); setField(L, "actionid", 0); setField(L, "type", 0); return; } if (Item* item = thing->getItem()) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else if (Creature* creature = thing->getCreature()) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); } else { lua_pushnil(L); } } void LuaScriptInterface::pushCylinder(lua_State* L, Cylinder* cylinder) { if (Creature* creature = cylinder->getCreature()) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); } else if (Item* parentItem = cylinder->getItem()) { pushUserdata<Item>(L, parentItem); setItemMetatable(L, -1, parentItem); } else if (Tile* tile = cylinder->getTile()) { pushUserdata<Tile>(L, tile); setMetatable(L, -1, "Tile"); } else if (cylinder == VirtualCylinder::virtualCylinder) { pushBoolean(L, true); } else { lua_pushnil(L); } } void LuaScriptInterface::pushString(lua_State* L, const std::string& value) { lua_pushlstring(L, value.c_str(), value.length()); } void LuaScriptInterface::pushCallback(lua_State* L, int32_t callback) { lua_rawgeti(L, LUA_REGISTRYINDEX, callback); } std::string LuaScriptInterface::popString(lua_State* L) { if (lua_gettop(L) == 0) { return std::string(); } std::string str(getString(L, -1)); lua_pop(L, 1); return str; } int32_t LuaScriptInterface::popCallback(lua_State* L) { return luaL_ref(L, LUA_REGISTRYINDEX); } // Metatables void LuaScriptInterface::setMetatable(lua_State* L, int32_t index, const std::string& name) { luaL_getmetatable(L, name.c_str()); lua_setmetatable(L, index - 1); } void LuaScriptInterface::setWeakMetatable(lua_State* L, int32_t index, const std::string& name) { static std::set<std::string> weakObjectTypes; const std::string& weakName = name + "_weak"; auto result = weakObjectTypes.emplace(name); if (result.second) { luaL_getmetatable(L, name.c_str()); int childMetatable = lua_gettop(L); luaL_newmetatable(L, weakName.c_str()); int metatable = lua_gettop(L); static const std::vector<std::string> methodKeys = {"__index", "__metatable", "__eq"}; for (const std::string& metaKey : methodKeys) { lua_getfield(L, childMetatable, metaKey.c_str()); lua_setfield(L, metatable, metaKey.c_str()); } static const std::vector<int> methodIndexes = {'h', 'p', 't'}; for (int metaIndex : methodIndexes) { lua_rawgeti(L, childMetatable, metaIndex); lua_rawseti(L, metatable, metaIndex); } lua_pushnil(L); lua_setfield(L, metatable, "__gc"); lua_remove(L, childMetatable); } else { luaL_getmetatable(L, weakName.c_str()); } lua_setmetatable(L, index - 1); } void LuaScriptInterface::setItemMetatable(lua_State* L, int32_t index, const Item* item) { if (item->getContainer()) { luaL_getmetatable(L, "Container"); } else if (item->getTeleport()) { luaL_getmetatable(L, "Teleport"); } else { luaL_getmetatable(L, "Item"); } lua_setmetatable(L, index - 1); } void LuaScriptInterface::setCreatureMetatable(lua_State* L, int32_t index, const Creature* creature) { if (creature->getPlayer()) { luaL_getmetatable(L, "Player"); } else if (creature->getMonster()) { luaL_getmetatable(L, "Monster"); } else { luaL_getmetatable(L, "Npc"); } lua_setmetatable(L, index - 1); } // Get std::string LuaScriptInterface::getString(lua_State* L, int32_t arg) { size_t len; const char* c_str = lua_tolstring(L, arg, &len); if (!c_str || len == 0) { return std::string(); } return std::string(c_str, len); } Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg, int32_t& stackpos) { Position position; position.x = getField<uint16_t>(L, arg, "x"); position.y = getField<uint16_t>(L, arg, "y"); position.z = getField<uint8_t>(L, arg, "z"); lua_getfield(L, arg, "stackpos"); if (lua_isnil(L, -1) == 1) { stackpos = 0; } else { stackpos = getNumber<int32_t>(L, -1); } lua_pop(L, 4); return position; } Position LuaScriptInterface::getPosition(lua_State* L, int32_t arg) { Position position; position.x = getField<uint16_t>(L, arg, "x"); position.y = getField<uint16_t>(L, arg, "y"); position.z = getField<uint8_t>(L, arg, "z"); lua_pop(L, 3); return position; } Outfit_t LuaScriptInterface::getOutfit(lua_State* L, int32_t arg) { Outfit_t outfit; outfit.lookMount = getField<uint16_t>(L, arg, "lookMount"); outfit.lookAddons = getField<uint8_t>(L, arg, "lookAddons"); outfit.lookFeet = getField<uint8_t>(L, arg, "lookFeet"); outfit.lookLegs = getField<uint8_t>(L, arg, "lookLegs"); outfit.lookBody = getField<uint8_t>(L, arg, "lookBody"); outfit.lookHead = getField<uint8_t>(L, arg, "lookHead"); outfit.lookTypeEx = getField<uint16_t>(L, arg, "lookTypeEx"); outfit.lookType = getField<uint16_t>(L, arg, "lookType"); lua_pop(L, 8); return outfit; } LuaVariant LuaScriptInterface::getVariant(lua_State* L, int32_t arg) { LuaVariant var; switch (var.type = getField<LuaVariantType_t>(L, arg, "type")) { case VARIANT_NUMBER: { var.number = getField<uint32_t>(L, arg, "number"); lua_pop(L, 2); break; } case VARIANT_STRING: { var.text = getFieldString(L, arg, "string"); lua_pop(L, 2); break; } case VARIANT_POSITION: case VARIANT_TARGETPOSITION: { lua_getfield(L, arg, "pos"); var.pos = getPosition(L, lua_gettop(L)); lua_pop(L, 2); break; } default: { var.type = VARIANT_NONE; lua_pop(L, 1); break; } } return var; } Thing* LuaScriptInterface::getThing(lua_State* L, int32_t arg) { Thing* thing; if (lua_getmetatable(L, arg) != 0) { lua_rawgeti(L, -1, 't'); switch(getNumber<uint32_t>(L, -1)) { case LuaData_Item: thing = getUserdata<Item>(L, arg); break; case LuaData_Container: thing = getUserdata<Container>(L, arg); break; case LuaData_Teleport: thing = getUserdata<Teleport>(L, arg); break; case LuaData_Player: thing = getUserdata<Player>(L, arg); break; case LuaData_Monster: thing = getUserdata<Monster>(L, arg); break; case LuaData_Npc: thing = getUserdata<Npc>(L, arg); break; default: thing = nullptr; break; } lua_pop(L, 2); } else { thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, arg)); } return thing; } Creature* LuaScriptInterface::getCreature(lua_State* L, int32_t arg) { if (isUserdata(L, arg)) { return getUserdata<Creature>(L, arg); } return g_game.getCreatureByID(getNumber<uint32_t>(L, arg)); } Player* LuaScriptInterface::getPlayer(lua_State* L, int32_t arg) { if (isUserdata(L, arg)) { return getUserdata<Player>(L, arg); } return g_game.getPlayerByID(getNumber<uint32_t>(L, arg)); } std::string LuaScriptInterface::getFieldString(lua_State* L, int32_t arg, const std::string& key) { lua_getfield(L, arg, key.c_str()); return getString(L, -1); } LuaDataType LuaScriptInterface::getUserdataType(lua_State* L, int32_t arg) { if (lua_getmetatable(L, arg) == 0) { return LuaData_Unknown; } lua_rawgeti(L, -1, 't'); LuaDataType type = getNumber<LuaDataType>(L, -1); lua_pop(L, 2); return type; } // Push void LuaScriptInterface::pushBoolean(lua_State* L, bool value) { lua_pushboolean(L, value ? 1 : 0); } void LuaScriptInterface::pushPosition(lua_State* L, const Position& position, int32_t stackpos/* = 0*/) { lua_createtable(L, 0, 4); setField(L, "x", position.x); setField(L, "y", position.y); setField(L, "z", position.z); setField(L, "stackpos", stackpos); setMetatable(L, -1, "Position"); } void LuaScriptInterface::pushOutfit(lua_State* L, const Outfit_t& outfit) { lua_createtable(L, 0, 8); setField(L, "lookType", outfit.lookType); setField(L, "lookTypeEx", outfit.lookTypeEx); setField(L, "lookHead", outfit.lookHead); setField(L, "lookBody", outfit.lookBody); setField(L, "lookLegs", outfit.lookLegs); setField(L, "lookFeet", outfit.lookFeet); setField(L, "lookAddons", outfit.lookAddons); setField(L, "lookMount", outfit.lookMount); } #define registerEnum(value) { std::string enumName = #value; registerGlobalVariable(enumName.substr(enumName.find_last_of(':') + 1), value); } #define registerEnumIn(tableName, value) { std::string enumName = #value; registerVariable(tableName, enumName.substr(enumName.find_last_of(':') + 1), value); } void LuaScriptInterface::registerFunctions() { //getPlayerFlagValue(cid, flag) lua_register(luaState, "getPlayerFlagValue", LuaScriptInterface::luaGetPlayerFlagValue); //getPlayerInstantSpellCount(cid) lua_register(luaState, "getPlayerInstantSpellCount", LuaScriptInterface::luaGetPlayerInstantSpellCount); //getPlayerInstantSpellInfo(cid, index) lua_register(luaState, "getPlayerInstantSpellInfo", LuaScriptInterface::luaGetPlayerInstantSpellInfo); //doPlayerAddItem(uid, itemid, <optional: default: 1> count/subtype) //doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype) //Returns uid of the created item lua_register(luaState, "doPlayerAddItem", LuaScriptInterface::luaDoPlayerAddItem); //doCreateItem(itemid, type/count, pos) //Returns uid of the created item, only works on tiles. lua_register(luaState, "doCreateItem", LuaScriptInterface::luaDoCreateItem); //doCreateItemEx(itemid, <optional> count/subtype) lua_register(luaState, "doCreateItemEx", LuaScriptInterface::luaDoCreateItemEx); //doTileAddItemEx(pos, uid) lua_register(luaState, "doTileAddItemEx", LuaScriptInterface::luaDoTileAddItemEx); //doMoveCreature(cid, direction) lua_register(luaState, "doMoveCreature", LuaScriptInterface::luaDoMoveCreature); //doSetCreatureLight(cid, lightLevel, lightColor, time) lua_register(luaState, "doSetCreatureLight", LuaScriptInterface::luaDoSetCreatureLight); //getCreatureCondition(cid, condition[, subId]) lua_register(luaState, "getCreatureCondition", LuaScriptInterface::luaGetCreatureCondition); //isValidUID(uid) lua_register(luaState, "isValidUID", LuaScriptInterface::luaIsValidUID); //isDepot(uid) lua_register(luaState, "isDepot", LuaScriptInterface::luaIsDepot); //isMovable(uid) lua_register(luaState, "isMovable", LuaScriptInterface::luaIsMoveable); //doAddContainerItem(uid, itemid, <optional> count/subtype) lua_register(luaState, "doAddContainerItem", LuaScriptInterface::luaDoAddContainerItem); //getDepotId(uid) lua_register(luaState, "getDepotId", LuaScriptInterface::luaGetDepotId); //getWorldTime() lua_register(luaState, "getWorldTime", LuaScriptInterface::luaGetWorldTime); //getWorldLight() lua_register(luaState, "getWorldLight", LuaScriptInterface::luaGetWorldLight); //getWorldUpTime() lua_register(luaState, "getWorldUpTime", LuaScriptInterface::luaGetWorldUpTime); //createCombatArea( {area}, <optional> {extArea} ) lua_register(luaState, "createCombatArea", LuaScriptInterface::luaCreateCombatArea); //doAreaCombatHealth(cid, type, pos, area, min, max, effect) lua_register(luaState, "doAreaCombatHealth", LuaScriptInterface::luaDoAreaCombatHealth); //doTargetCombatHealth(cid, target, type, min, max, effect) lua_register(luaState, "doTargetCombatHealth", LuaScriptInterface::luaDoTargetCombatHealth); //doAreaCombatMana(cid, pos, area, min, max, effect) lua_register(luaState, "doAreaCombatMana", LuaScriptInterface::luaDoAreaCombatMana); //doTargetCombatMana(cid, target, min, max, effect) lua_register(luaState, "doTargetCombatMana", LuaScriptInterface::luaDoTargetCombatMana); //doAreaCombatCondition(cid, pos, area, condition, effect) lua_register(luaState, "doAreaCombatCondition", LuaScriptInterface::luaDoAreaCombatCondition); //doTargetCombatCondition(cid, target, condition, effect) lua_register(luaState, "doTargetCombatCondition", LuaScriptInterface::luaDoTargetCombatCondition); //doAreaCombatDispel(cid, pos, area, type, effect) lua_register(luaState, "doAreaCombatDispel", LuaScriptInterface::luaDoAreaCombatDispel); //doTargetCombatDispel(cid, target, type, effect) lua_register(luaState, "doTargetCombatDispel", LuaScriptInterface::luaDoTargetCombatDispel); //doChallengeCreature(cid, target) lua_register(luaState, "doChallengeCreature", LuaScriptInterface::luaDoChallengeCreature); //doSetMonsterOutfit(cid, name, time) lua_register(luaState, "doSetMonsterOutfit", LuaScriptInterface::luaSetMonsterOutfit); //doSetItemOutfit(cid, item, time) lua_register(luaState, "doSetItemOutfit", LuaScriptInterface::luaSetItemOutfit); //doSetCreatureOutfit(cid, outfit, time) lua_register(luaState, "doSetCreatureOutfit", LuaScriptInterface::luaSetCreatureOutfit); //isInArray(array, value) lua_register(luaState, "isInArray", LuaScriptInterface::luaIsInArray); //addEvent(callback, delay, ...) lua_register(luaState, "addEvent", LuaScriptInterface::luaAddEvent); //stopEvent(eventid) lua_register(luaState, "stopEvent", LuaScriptInterface::luaStopEvent); //saveServer() lua_register(luaState, "saveServer", LuaScriptInterface::luaSaveServer); //cleanMap() lua_register(luaState, "cleanMap", LuaScriptInterface::luaCleanMap); //debugPrint(text) lua_register(luaState, "debugPrint", LuaScriptInterface::luaDebugPrint); //isInWar(cid, target) lua_register(luaState, "isInWar", LuaScriptInterface::luaIsInWar); //getWaypointPosition(name) lua_register(luaState, "getWaypointPositionByName", LuaScriptInterface::luaGetWaypointPositionByName); //sendChannelMessage(channelId, type, message) lua_register(luaState, "sendChannelMessage", LuaScriptInterface::luaSendChannelMessage); //sendGuildChannelMessage(guildId, type, message) lua_register(luaState, "sendGuildChannelMessage", LuaScriptInterface::luaSendGuildChannelMessage); #ifndef LUAJIT_VERSION //bit operations for Lua, based on bitlib project release 24 //bit.bnot, bit.band, bit.bor, bit.bxor, bit.lshift, bit.rshift luaL_register(luaState, "bit", LuaScriptInterface::luaBitReg); #endif //configManager table luaL_register(luaState, "configManager", LuaScriptInterface::luaConfigManagerTable); //db table luaL_register(luaState, "db", LuaScriptInterface::luaDatabaseTable); //result table luaL_register(luaState, "result", LuaScriptInterface::luaResultTable); /* New functions */ //registerClass(className, baseClass, newFunction) //registerTable(tableName) //registerMethod(className, functionName, function) //registerMetaMethod(className, functionName, function) //registerGlobalMethod(functionName, function) //registerVariable(tableName, name, value) //registerGlobalVariable(name, value) //registerEnum(value) //registerEnumIn(tableName, value) // Enums registerEnum(ACCOUNT_TYPE_NORMAL) registerEnum(ACCOUNT_TYPE_TUTOR) registerEnum(ACCOUNT_TYPE_SENIORTUTOR) registerEnum(ACCOUNT_TYPE_GAMEMASTER) registerEnum(ACCOUNT_TYPE_GOD) registerEnum(CALLBACK_PARAM_LEVELMAGICVALUE) registerEnum(CALLBACK_PARAM_SKILLVALUE) registerEnum(CALLBACK_PARAM_TARGETTILE) registerEnum(CALLBACK_PARAM_TARGETCREATURE) registerEnum(COMBAT_FORMULA_UNDEFINED) registerEnum(COMBAT_FORMULA_LEVELMAGIC) registerEnum(COMBAT_FORMULA_SKILL) registerEnum(COMBAT_FORMULA_DAMAGE) registerEnum(DIRECTION_NORTH) registerEnum(DIRECTION_EAST) registerEnum(DIRECTION_SOUTH) registerEnum(DIRECTION_WEST) registerEnum(DIRECTION_SOUTHWEST) registerEnum(DIRECTION_SOUTHEAST) registerEnum(DIRECTION_NORTHWEST) registerEnum(DIRECTION_NORTHEAST) registerEnum(COMBAT_NONE) registerEnum(COMBAT_PHYSICALDAMAGE) registerEnum(COMBAT_ENERGYDAMAGE) registerEnum(COMBAT_EARTHDAMAGE) registerEnum(COMBAT_FIREDAMAGE) registerEnum(COMBAT_UNDEFINEDDAMAGE) registerEnum(COMBAT_LIFEDRAIN) registerEnum(COMBAT_MANADRAIN) registerEnum(COMBAT_HEALING) registerEnum(COMBAT_DROWNDAMAGE) registerEnum(COMBAT_ICEDAMAGE) registerEnum(COMBAT_HOLYDAMAGE) registerEnum(COMBAT_DEATHDAMAGE) registerEnum(COMBAT_PARAM_TYPE) registerEnum(COMBAT_PARAM_EFFECT) registerEnum(COMBAT_PARAM_DISTANCEEFFECT) registerEnum(COMBAT_PARAM_BLOCKSHIELD) registerEnum(COMBAT_PARAM_BLOCKARMOR) registerEnum(COMBAT_PARAM_TARGETCASTERORTOPMOST) registerEnum(COMBAT_PARAM_CREATEITEM) registerEnum(COMBAT_PARAM_AGGRESSIVE) registerEnum(COMBAT_PARAM_DISPEL) registerEnum(COMBAT_PARAM_USECHARGES) registerEnum(CONDITION_NONE) registerEnum(CONDITION_POISON) registerEnum(CONDITION_FIRE) registerEnum(CONDITION_ENERGY) registerEnum(CONDITION_BLEEDING) registerEnum(CONDITION_HASTE) registerEnum(CONDITION_PARALYZE) registerEnum(CONDITION_OUTFIT) registerEnum(CONDITION_INVISIBLE) registerEnum(CONDITION_LIGHT) registerEnum(CONDITION_MANASHIELD) registerEnum(CONDITION_INFIGHT) registerEnum(CONDITION_DRUNK) registerEnum(CONDITION_EXHAUST_WEAPON) registerEnum(CONDITION_REGENERATION) registerEnum(CONDITION_SOUL) registerEnum(CONDITION_DROWN) registerEnum(CONDITION_MUTED) registerEnum(CONDITION_CHANNELMUTEDTICKS) registerEnum(CONDITION_YELLTICKS) registerEnum(CONDITION_ATTRIBUTES) registerEnum(CONDITION_FREEZING) registerEnum(CONDITION_DAZZLED) registerEnum(CONDITION_CURSED) registerEnum(CONDITION_EXHAUST_COMBAT) registerEnum(CONDITION_EXHAUST_HEAL) registerEnum(CONDITION_PACIFIED) registerEnum(CONDITION_SPELLCOOLDOWN) registerEnum(CONDITION_SPELLGROUPCOOLDOWN) registerEnum(CONDITIONID_DEFAULT) registerEnum(CONDITIONID_COMBAT) registerEnum(CONDITIONID_HEAD) registerEnum(CONDITIONID_NECKLACE) registerEnum(CONDITIONID_BACKPACK) registerEnum(CONDITIONID_ARMOR) registerEnum(CONDITIONID_RIGHT) registerEnum(CONDITIONID_LEFT) registerEnum(CONDITIONID_LEGS) registerEnum(CONDITIONID_FEET) registerEnum(CONDITIONID_RING) registerEnum(CONDITIONID_AMMO) registerEnum(CONDITION_PARAM_OWNER) registerEnum(CONDITION_PARAM_TICKS) registerEnum(CONDITION_PARAM_HEALTHGAIN) registerEnum(CONDITION_PARAM_HEALTHTICKS) registerEnum(CONDITION_PARAM_MANAGAIN) registerEnum(CONDITION_PARAM_MANATICKS) registerEnum(CONDITION_PARAM_DELAYED) registerEnum(CONDITION_PARAM_SPEED) registerEnum(CONDITION_PARAM_LIGHT_LEVEL) registerEnum(CONDITION_PARAM_LIGHT_COLOR) registerEnum(CONDITION_PARAM_SOULGAIN) registerEnum(CONDITION_PARAM_SOULTICKS) registerEnum(CONDITION_PARAM_MINVALUE) registerEnum(CONDITION_PARAM_MAXVALUE) registerEnum(CONDITION_PARAM_STARTVALUE) registerEnum(CONDITION_PARAM_TICKINTERVAL) registerEnum(CONDITION_PARAM_FORCEUPDATE) registerEnum(CONDITION_PARAM_SKILL_MELEE) registerEnum(CONDITION_PARAM_SKILL_FIST) registerEnum(CONDITION_PARAM_SKILL_CLUB) registerEnum(CONDITION_PARAM_SKILL_SWORD) registerEnum(CONDITION_PARAM_SKILL_AXE) registerEnum(CONDITION_PARAM_SKILL_DISTANCE) registerEnum(CONDITION_PARAM_SKILL_SHIELD) registerEnum(CONDITION_PARAM_SKILL_FISHING) registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTS) registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTS) registerEnum(CONDITION_PARAM_STAT_MAGICPOINTS) registerEnum(CONDITION_PARAM_STAT_MAXHITPOINTSPERCENT) registerEnum(CONDITION_PARAM_STAT_MAXMANAPOINTSPERCENT) registerEnum(CONDITION_PARAM_STAT_MAGICPOINTSPERCENT) registerEnum(CONDITION_PARAM_PERIODICDAMAGE) registerEnum(CONDITION_PARAM_SKILL_MELEEPERCENT) registerEnum(CONDITION_PARAM_SKILL_FISTPERCENT) registerEnum(CONDITION_PARAM_SKILL_CLUBPERCENT) registerEnum(CONDITION_PARAM_SKILL_SWORDPERCENT) registerEnum(CONDITION_PARAM_SKILL_AXEPERCENT) registerEnum(CONDITION_PARAM_SKILL_DISTANCEPERCENT) registerEnum(CONDITION_PARAM_SKILL_SHIELDPERCENT) registerEnum(CONDITION_PARAM_SKILL_FISHINGPERCENT) registerEnum(CONDITION_PARAM_BUFF_SPELL) registerEnum(CONDITION_PARAM_SUBID) registerEnum(CONDITION_PARAM_FIELD) registerEnum(CONST_ME_NONE) registerEnum(CONST_ME_DRAWBLOOD) registerEnum(CONST_ME_LOSEENERGY) registerEnum(CONST_ME_POFF) registerEnum(CONST_ME_BLOCKHIT) registerEnum(CONST_ME_EXPLOSIONAREA) registerEnum(CONST_ME_EXPLOSIONHIT) registerEnum(CONST_ME_FIREAREA) registerEnum(CONST_ME_YELLOW_RINGS) registerEnum(CONST_ME_GREEN_RINGS) registerEnum(CONST_ME_HITAREA) registerEnum(CONST_ME_TELEPORT) registerEnum(CONST_ME_ENERGYHIT) registerEnum(CONST_ME_MAGIC_BLUE) registerEnum(CONST_ME_MAGIC_RED) registerEnum(CONST_ME_MAGIC_GREEN) registerEnum(CONST_ME_HITBYFIRE) registerEnum(CONST_ME_HITBYPOISON) registerEnum(CONST_ME_MORTAREA) registerEnum(CONST_ME_SOUND_GREEN) registerEnum(CONST_ME_SOUND_RED) registerEnum(CONST_ME_POISONAREA) registerEnum(CONST_ME_SOUND_YELLOW) registerEnum(CONST_ME_SOUND_PURPLE) registerEnum(CONST_ME_SOUND_BLUE) registerEnum(CONST_ME_SOUND_WHITE) registerEnum(CONST_ME_BUBBLES) registerEnum(CONST_ME_CRAPS) registerEnum(CONST_ME_GIFT_WRAPS) registerEnum(CONST_ME_FIREWORK_YELLOW) registerEnum(CONST_ME_FIREWORK_RED) registerEnum(CONST_ME_FIREWORK_BLUE) registerEnum(CONST_ME_STUN) registerEnum(CONST_ME_SLEEP) registerEnum(CONST_ME_WATERCREATURE) registerEnum(CONST_ME_GROUNDSHAKER) registerEnum(CONST_ME_HEARTS) registerEnum(CONST_ME_FIREATTACK) registerEnum(CONST_ME_ENERGYAREA) registerEnum(CONST_ME_SMALLCLOUDS) registerEnum(CONST_ME_HOLYDAMAGE) registerEnum(CONST_ME_BIGCLOUDS) registerEnum(CONST_ME_ICEAREA) registerEnum(CONST_ME_ICETORNADO) registerEnum(CONST_ME_ICEATTACK) registerEnum(CONST_ME_STONES) registerEnum(CONST_ME_SMALLPLANTS) registerEnum(CONST_ME_CARNIPHILA) registerEnum(CONST_ME_PURPLEENERGY) registerEnum(CONST_ME_YELLOWENERGY) registerEnum(CONST_ME_HOLYAREA) registerEnum(CONST_ME_BIGPLANTS) registerEnum(CONST_ME_CAKE) registerEnum(CONST_ME_GIANTICE) registerEnum(CONST_ME_WATERSPLASH) registerEnum(CONST_ME_PLANTATTACK) registerEnum(CONST_ME_TUTORIALARROW) registerEnum(CONST_ME_TUTORIALSQUARE) registerEnum(CONST_ME_MIRRORHORIZONTAL) registerEnum(CONST_ME_MIRRORVERTICAL) registerEnum(CONST_ME_SKULLHORIZONTAL) registerEnum(CONST_ME_SKULLVERTICAL) registerEnum(CONST_ME_ASSASSIN) registerEnum(CONST_ME_STEPSHORIZONTAL) registerEnum(CONST_ME_BLOODYSTEPS) registerEnum(CONST_ME_STEPSVERTICAL) registerEnum(CONST_ME_YALAHARIGHOST) registerEnum(CONST_ME_BATS) registerEnum(CONST_ME_SMOKE) registerEnum(CONST_ME_INSECTS) registerEnum(CONST_ME_DRAGONHEAD) registerEnum(CONST_ME_ORCSHAMAN) registerEnum(CONST_ME_ORCSHAMAN_FIRE) registerEnum(CONST_ME_THUNDER) registerEnum(CONST_ME_FERUMBRAS) registerEnum(CONST_ME_CONFETTI_HORIZONTAL) registerEnum(CONST_ME_CONFETTI_VERTICAL) registerEnum(CONST_ME_BLACKSMOKE) registerEnum(CONST_ME_REDSMOKE) registerEnum(CONST_ME_YELLOWSMOKE) registerEnum(CONST_ME_GREENSMOKE) registerEnum(CONST_ME_PURPLESMOKE) registerEnum(CONST_ANI_NONE) registerEnum(CONST_ANI_SPEAR) registerEnum(CONST_ANI_BOLT) registerEnum(CONST_ANI_ARROW) registerEnum(CONST_ANI_FIRE) registerEnum(CONST_ANI_ENERGY) registerEnum(CONST_ANI_POISONARROW) registerEnum(CONST_ANI_BURSTARROW) registerEnum(CONST_ANI_THROWINGSTAR) registerEnum(CONST_ANI_THROWINGKNIFE) registerEnum(CONST_ANI_SMALLSTONE) registerEnum(CONST_ANI_DEATH) registerEnum(CONST_ANI_LARGEROCK) registerEnum(CONST_ANI_SNOWBALL) registerEnum(CONST_ANI_POWERBOLT) registerEnum(CONST_ANI_POISON) registerEnum(CONST_ANI_INFERNALBOLT) registerEnum(CONST_ANI_HUNTINGSPEAR) registerEnum(CONST_ANI_ENCHANTEDSPEAR) registerEnum(CONST_ANI_REDSTAR) registerEnum(CONST_ANI_GREENSTAR) registerEnum(CONST_ANI_ROYALSPEAR) registerEnum(CONST_ANI_SNIPERARROW) registerEnum(CONST_ANI_ONYXARROW) registerEnum(CONST_ANI_PIERCINGBOLT) registerEnum(CONST_ANI_WHIRLWINDSWORD) registerEnum(CONST_ANI_WHIRLWINDAXE) registerEnum(CONST_ANI_WHIRLWINDCLUB) registerEnum(CONST_ANI_ETHEREALSPEAR) registerEnum(CONST_ANI_ICE) registerEnum(CONST_ANI_EARTH) registerEnum(CONST_ANI_HOLY) registerEnum(CONST_ANI_SUDDENDEATH) registerEnum(CONST_ANI_FLASHARROW) registerEnum(CONST_ANI_FLAMMINGARROW) registerEnum(CONST_ANI_SHIVERARROW) registerEnum(CONST_ANI_ENERGYBALL) registerEnum(CONST_ANI_SMALLICE) registerEnum(CONST_ANI_SMALLHOLY) registerEnum(CONST_ANI_SMALLEARTH) registerEnum(CONST_ANI_EARTHARROW) registerEnum(CONST_ANI_EXPLOSION) registerEnum(CONST_ANI_CAKE) registerEnum(CONST_ANI_TARSALARROW) registerEnum(CONST_ANI_VORTEXBOLT) registerEnum(CONST_ANI_PRISMATICBOLT) registerEnum(CONST_ANI_CRYSTALLINEARROW) registerEnum(CONST_ANI_DRILLBOLT) registerEnum(CONST_ANI_ENVENOMEDARROW) registerEnum(CONST_ANI_GLOOTHSPEAR) registerEnum(CONST_ANI_SIMPLEARROW) registerEnum(CONST_ANI_WEAPONTYPE) registerEnum(CONST_PROP_BLOCKSOLID) registerEnum(CONST_PROP_HASHEIGHT) registerEnum(CONST_PROP_BLOCKPROJECTILE) registerEnum(CONST_PROP_BLOCKPATH) registerEnum(CONST_PROP_ISVERTICAL) registerEnum(CONST_PROP_ISHORIZONTAL) registerEnum(CONST_PROP_MOVEABLE) registerEnum(CONST_PROP_IMMOVABLEBLOCKSOLID) registerEnum(CONST_PROP_IMMOVABLEBLOCKPATH) registerEnum(CONST_PROP_IMMOVABLENOFIELDBLOCKPATH) registerEnum(CONST_PROP_NOFIELDBLOCKPATH) registerEnum(CONST_PROP_SUPPORTHANGABLE) registerEnum(CONST_SLOT_HEAD) registerEnum(CONST_SLOT_NECKLACE) registerEnum(CONST_SLOT_BACKPACK) registerEnum(CONST_SLOT_ARMOR) registerEnum(CONST_SLOT_RIGHT) registerEnum(CONST_SLOT_LEFT) registerEnum(CONST_SLOT_LEGS) registerEnum(CONST_SLOT_FEET) registerEnum(CONST_SLOT_RING) registerEnum(CONST_SLOT_AMMO) registerEnum(CREATURE_EVENT_NONE) registerEnum(CREATURE_EVENT_LOGIN) registerEnum(CREATURE_EVENT_LOGOUT) registerEnum(CREATURE_EVENT_THINK) registerEnum(CREATURE_EVENT_PREPAREDEATH) registerEnum(CREATURE_EVENT_DEATH) registerEnum(CREATURE_EVENT_KILL) registerEnum(CREATURE_EVENT_ADVANCE) registerEnum(CREATURE_EVENT_MODALWINDOW) registerEnum(CREATURE_EVENT_TEXTEDIT) registerEnum(CREATURE_EVENT_HEALTHCHANGE) registerEnum(CREATURE_EVENT_MANACHANGE) registerEnum(CREATURE_EVENT_EXTENDED_OPCODE) registerEnum(GAME_STATE_STARTUP) registerEnum(GAME_STATE_INIT) registerEnum(GAME_STATE_NORMAL) registerEnum(GAME_STATE_CLOSED) registerEnum(GAME_STATE_SHUTDOWN) registerEnum(GAME_STATE_CLOSING) registerEnum(GAME_STATE_MAINTAIN) registerEnum(MESSAGE_STATUS_CONSOLE_BLUE) registerEnum(MESSAGE_STATUS_CONSOLE_RED) registerEnum(MESSAGE_STATUS_DEFAULT) registerEnum(MESSAGE_STATUS_WARNING) registerEnum(MESSAGE_EVENT_ADVANCE) registerEnum(MESSAGE_STATUS_SMALL) registerEnum(MESSAGE_INFO_DESCR) registerEnum(MESSAGE_DAMAGE_DEALT) registerEnum(MESSAGE_DAMAGE_RECEIVED) registerEnum(MESSAGE_HEALED) registerEnum(MESSAGE_EXPERIENCE) registerEnum(MESSAGE_DAMAGE_OTHERS) registerEnum(MESSAGE_HEALED_OTHERS) registerEnum(MESSAGE_EXPERIENCE_OTHERS) registerEnum(MESSAGE_EVENT_DEFAULT) registerEnum(MESSAGE_EVENT_ORANGE) registerEnum(MESSAGE_STATUS_CONSOLE_ORANGE) registerEnum(CREATURETYPE_PLAYER) registerEnum(CREATURETYPE_MONSTER) registerEnum(CREATURETYPE_NPC) registerEnum(CREATURETYPE_SUMMON_OWN) registerEnum(CREATURETYPE_SUMMON_OTHERS) registerEnum(CLIENTOS_LINUX) registerEnum(CLIENTOS_WINDOWS) registerEnum(CLIENTOS_FLASH) registerEnum(CLIENTOS_OTCLIENT_LINUX) registerEnum(CLIENTOS_OTCLIENT_WINDOWS) registerEnum(CLIENTOS_OTCLIENT_MAC) registerEnum(ITEM_ATTRIBUTE_NONE) registerEnum(ITEM_ATTRIBUTE_ACTIONID) registerEnum(ITEM_ATTRIBUTE_UNIQUEID) registerEnum(ITEM_ATTRIBUTE_DESCRIPTION) registerEnum(ITEM_ATTRIBUTE_TEXT) registerEnum(ITEM_ATTRIBUTE_DATE) registerEnum(ITEM_ATTRIBUTE_WRITER) registerEnum(ITEM_ATTRIBUTE_NAME) registerEnum(ITEM_ATTRIBUTE_ARTICLE) registerEnum(ITEM_ATTRIBUTE_PLURALNAME) registerEnum(ITEM_ATTRIBUTE_WEIGHT) registerEnum(ITEM_ATTRIBUTE_ATTACK) registerEnum(ITEM_ATTRIBUTE_DEFENSE) registerEnum(ITEM_ATTRIBUTE_EXTRADEFENSE) registerEnum(ITEM_ATTRIBUTE_ARMOR) registerEnum(ITEM_ATTRIBUTE_HITCHANCE) registerEnum(ITEM_ATTRIBUTE_SHOOTRANGE) registerEnum(ITEM_ATTRIBUTE_OWNER) registerEnum(ITEM_ATTRIBUTE_DURATION) registerEnum(ITEM_ATTRIBUTE_DECAYSTATE) registerEnum(ITEM_ATTRIBUTE_CORPSEOWNER) registerEnum(ITEM_ATTRIBUTE_CHARGES) registerEnum(ITEM_ATTRIBUTE_FLUIDTYPE) registerEnum(ITEM_ATTRIBUTE_DOORID) registerEnum(ITEM_TYPE_DEPOT) registerEnum(ITEM_TYPE_MAILBOX) registerEnum(ITEM_TYPE_TRASHHOLDER) registerEnum(ITEM_TYPE_CONTAINER) registerEnum(ITEM_TYPE_DOOR) registerEnum(ITEM_TYPE_MAGICFIELD) registerEnum(ITEM_TYPE_TELEPORT) registerEnum(ITEM_TYPE_BED) registerEnum(ITEM_TYPE_KEY) registerEnum(ITEM_TYPE_RUNE) registerEnum(ITEM_BAG) registerEnum(ITEM_GOLD_COIN) registerEnum(ITEM_PLATINUM_COIN) registerEnum(ITEM_CRYSTAL_COIN) registerEnum(ITEM_AMULETOFLOSS) registerEnum(ITEM_PARCEL) registerEnum(ITEM_LABEL) registerEnum(ITEM_FIREFIELD_PVP_FULL) registerEnum(ITEM_FIREFIELD_PVP_MEDIUM) registerEnum(ITEM_FIREFIELD_PVP_SMALL) registerEnum(ITEM_FIREFIELD_PERSISTENT_FULL) registerEnum(ITEM_FIREFIELD_PERSISTENT_MEDIUM) registerEnum(ITEM_FIREFIELD_PERSISTENT_SMALL) registerEnum(ITEM_FIREFIELD_NOPVP) registerEnum(ITEM_POISONFIELD_PVP) registerEnum(ITEM_POISONFIELD_PERSISTENT) registerEnum(ITEM_POISONFIELD_NOPVP) registerEnum(ITEM_ENERGYFIELD_PVP) registerEnum(ITEM_ENERGYFIELD_PERSISTENT) registerEnum(ITEM_ENERGYFIELD_NOPVP) registerEnum(ITEM_MAGICWALL) registerEnum(ITEM_MAGICWALL_PERSISTENT) registerEnum(ITEM_MAGICWALL_SAFE) registerEnum(ITEM_WILDGROWTH) registerEnum(ITEM_WILDGROWTH_PERSISTENT) registerEnum(ITEM_WILDGROWTH_SAFE) registerEnum(PlayerFlag_CannotUseCombat) registerEnum(PlayerFlag_CannotAttackPlayer) registerEnum(PlayerFlag_CannotAttackMonster) registerEnum(PlayerFlag_CannotBeAttacked) registerEnum(PlayerFlag_CanConvinceAll) registerEnum(PlayerFlag_CanSummonAll) registerEnum(PlayerFlag_CanIllusionAll) registerEnum(PlayerFlag_CanSenseInvisibility) registerEnum(PlayerFlag_IgnoredByMonsters) registerEnum(PlayerFlag_NotGainInFight) registerEnum(PlayerFlag_HasInfiniteMana) registerEnum(PlayerFlag_HasInfiniteSoul) registerEnum(PlayerFlag_HasNoExhaustion) registerEnum(PlayerFlag_CannotUseSpells) registerEnum(PlayerFlag_CannotPickupItem) registerEnum(PlayerFlag_CanAlwaysLogin) registerEnum(PlayerFlag_CanBroadcast) registerEnum(PlayerFlag_CanEditHouses) registerEnum(PlayerFlag_CannotBeBanned) registerEnum(PlayerFlag_CannotBePushed) registerEnum(PlayerFlag_HasInfiniteCapacity) registerEnum(PlayerFlag_CanPushAllCreatures) registerEnum(PlayerFlag_CanTalkRedPrivate) registerEnum(PlayerFlag_CanTalkRedChannel) registerEnum(PlayerFlag_TalkOrangeHelpChannel) registerEnum(PlayerFlag_NotGainExperience) registerEnum(PlayerFlag_NotGainMana) registerEnum(PlayerFlag_NotGainHealth) registerEnum(PlayerFlag_NotGainSkill) registerEnum(PlayerFlag_SetMaxSpeed) registerEnum(PlayerFlag_SpecialVIP) registerEnum(PlayerFlag_NotGenerateLoot) registerEnum(PlayerFlag_CanTalkRedChannelAnonymous) registerEnum(PlayerFlag_IgnoreProtectionZone) registerEnum(PlayerFlag_IgnoreSpellCheck) registerEnum(PlayerFlag_IgnoreWeaponCheck) registerEnum(PlayerFlag_CannotBeMuted) registerEnum(PlayerFlag_IsAlwaysPremium) registerEnum(PLAYERSEX_FEMALE) registerEnum(PLAYERSEX_MALE) registerEnum(VOCATION_NONE) registerEnum(SKILL_FIST) registerEnum(SKILL_CLUB) registerEnum(SKILL_SWORD) registerEnum(SKILL_AXE) registerEnum(SKILL_DISTANCE) registerEnum(SKILL_SHIELD) registerEnum(SKILL_FISHING) registerEnum(SKILL_MAGLEVEL) registerEnum(SKILL_LEVEL) registerEnum(SKULL_NONE) registerEnum(SKULL_YELLOW) registerEnum(SKULL_GREEN) registerEnum(SKULL_WHITE) registerEnum(SKULL_RED) registerEnum(SKULL_BLACK) registerEnum(SKULL_ORANGE) registerEnum(TALKTYPE_SAY) registerEnum(TALKTYPE_WHISPER) registerEnum(TALKTYPE_YELL) registerEnum(TALKTYPE_PRIVATE_FROM) registerEnum(TALKTYPE_PRIVATE_TO) registerEnum(TALKTYPE_CHANNEL_Y) registerEnum(TALKTYPE_CHANNEL_O) registerEnum(TALKTYPE_PRIVATE_NP) registerEnum(TALKTYPE_PRIVATE_PN) registerEnum(TALKTYPE_BROADCAST) registerEnum(TALKTYPE_CHANNEL_R1) registerEnum(TALKTYPE_PRIVATE_RED_FROM) registerEnum(TALKTYPE_PRIVATE_RED_TO) registerEnum(TALKTYPE_MONSTER_SAY) registerEnum(TALKTYPE_MONSTER_YELL) registerEnum(TALKTYPE_CHANNEL_R2) registerEnum(TEXTCOLOR_BLUE) registerEnum(TEXTCOLOR_LIGHTGREEN) registerEnum(TEXTCOLOR_LIGHTBLUE) registerEnum(TEXTCOLOR_MAYABLUE) registerEnum(TEXTCOLOR_DARKRED) registerEnum(TEXTCOLOR_LIGHTGREY) registerEnum(TEXTCOLOR_SKYBLUE) registerEnum(TEXTCOLOR_PURPLE) registerEnum(TEXTCOLOR_RED) registerEnum(TEXTCOLOR_ORANGE) registerEnum(TEXTCOLOR_YELLOW) registerEnum(TEXTCOLOR_WHITE_EXP) registerEnum(TEXTCOLOR_NONE) registerEnum(TILESTATE_NONE) registerEnum(TILESTATE_PROTECTIONZONE) registerEnum(TILESTATE_NOPVPZONE) registerEnum(TILESTATE_NOLOGOUT) registerEnum(TILESTATE_PVPZONE) registerEnum(TILESTATE_FLOORCHANGE) registerEnum(TILESTATE_FLOORCHANGE_DOWN) registerEnum(TILESTATE_FLOORCHANGE_NORTH) registerEnum(TILESTATE_FLOORCHANGE_SOUTH) registerEnum(TILESTATE_FLOORCHANGE_EAST) registerEnum(TILESTATE_FLOORCHANGE_WEST) registerEnum(TILESTATE_TELEPORT) registerEnum(TILESTATE_MAGICFIELD) registerEnum(TILESTATE_MAILBOX) registerEnum(TILESTATE_TRASHHOLDER) registerEnum(TILESTATE_BED) registerEnum(TILESTATE_DEPOT) registerEnum(TILESTATE_BLOCKSOLID) registerEnum(TILESTATE_BLOCKPATH) registerEnum(TILESTATE_IMMOVABLEBLOCKSOLID) registerEnum(TILESTATE_IMMOVABLEBLOCKPATH) registerEnum(TILESTATE_IMMOVABLENOFIELDBLOCKPATH) registerEnum(TILESTATE_NOFIELDBLOCKPATH) registerEnum(TILESTATE_FLOORCHANGE_SOUTH_ALT) registerEnum(TILESTATE_FLOORCHANGE_EAST_ALT) registerEnum(TILESTATE_SUPPORTS_HANGABLE) registerEnum(WEAPON_NONE) registerEnum(WEAPON_SWORD) registerEnum(WEAPON_CLUB) registerEnum(WEAPON_AXE) registerEnum(WEAPON_SHIELD) registerEnum(WEAPON_DISTANCE) registerEnum(WEAPON_WAND) registerEnum(WEAPON_AMMO) registerEnum(WORLD_TYPE_NO_PVP) registerEnum(WORLD_TYPE_PVP) registerEnum(WORLD_TYPE_PVP_ENFORCED) // Use with container:addItem, container:addItemEx and possibly other functions. registerEnum(FLAG_NOLIMIT) registerEnum(FLAG_IGNOREBLOCKITEM) registerEnum(FLAG_IGNOREBLOCKCREATURE) registerEnum(FLAG_CHILDISOWNER) registerEnum(FLAG_PATHFINDING) registerEnum(FLAG_IGNOREFIELDDAMAGE) registerEnum(FLAG_IGNORENOTMOVEABLE) registerEnum(FLAG_IGNOREAUTOSTACK) // Use with itemType:getSlotPosition registerEnum(SLOTP_WHEREEVER) registerEnum(SLOTP_HEAD) registerEnum(SLOTP_NECKLACE) registerEnum(SLOTP_BACKPACK) registerEnum(SLOTP_ARMOR) registerEnum(SLOTP_RIGHT) registerEnum(SLOTP_LEFT) registerEnum(SLOTP_LEGS) registerEnum(SLOTP_FEET) registerEnum(SLOTP_RING) registerEnum(SLOTP_AMMO) registerEnum(SLOTP_DEPOT) registerEnum(SLOTP_TWO_HAND) // Use with combat functions registerEnum(ORIGIN_NONE) registerEnum(ORIGIN_CONDITION) registerEnum(ORIGIN_SPELL) registerEnum(ORIGIN_MELEE) registerEnum(ORIGIN_RANGED) // Use with house:getAccessList, house:setAccessList registerEnum(GUEST_LIST) registerEnum(SUBOWNER_LIST) // Use with npc:setSpeechBubble registerEnum(SPEECHBUBBLE_NONE) registerEnum(SPEECHBUBBLE_NORMAL) registerEnum(SPEECHBUBBLE_TRADE) registerEnum(SPEECHBUBBLE_QUEST) registerEnum(SPEECHBUBBLE_QUESTTRADER) // Use with player:addMapMark registerEnum(MAPMARK_TICK) registerEnum(MAPMARK_QUESTION) registerEnum(MAPMARK_EXCLAMATION) registerEnum(MAPMARK_STAR) registerEnum(MAPMARK_CROSS) registerEnum(MAPMARK_TEMPLE) registerEnum(MAPMARK_KISS) registerEnum(MAPMARK_SHOVEL) registerEnum(MAPMARK_SWORD) registerEnum(MAPMARK_FLAG) registerEnum(MAPMARK_LOCK) registerEnum(MAPMARK_BAG) registerEnum(MAPMARK_SKULL) registerEnum(MAPMARK_DOLLAR) registerEnum(MAPMARK_REDNORTH) registerEnum(MAPMARK_REDSOUTH) registerEnum(MAPMARK_REDEAST) registerEnum(MAPMARK_REDWEST) registerEnum(MAPMARK_GREENNORTH) registerEnum(MAPMARK_GREENSOUTH) // Use with Game.getReturnMessage registerEnum(RETURNVALUE_NOERROR) registerEnum(RETURNVALUE_NOTPOSSIBLE) registerEnum(RETURNVALUE_NOTENOUGHROOM) registerEnum(RETURNVALUE_PLAYERISPZLOCKED) registerEnum(RETURNVALUE_PLAYERISNOTINVITED) registerEnum(RETURNVALUE_CANNOTTHROW) registerEnum(RETURNVALUE_THEREISNOWAY) registerEnum(RETURNVALUE_DESTINATIONOUTOFREACH) registerEnum(RETURNVALUE_CREATUREBLOCK) registerEnum(RETURNVALUE_NOTMOVEABLE) registerEnum(RETURNVALUE_DROPTWOHANDEDITEM) registerEnum(RETURNVALUE_BOTHHANDSNEEDTOBEFREE) registerEnum(RETURNVALUE_CANONLYUSEONEWEAPON) registerEnum(RETURNVALUE_NEEDEXCHANGE) registerEnum(RETURNVALUE_CANNOTBEDRESSED) registerEnum(RETURNVALUE_PUTTHISOBJECTINYOURHAND) registerEnum(RETURNVALUE_PUTTHISOBJECTINBOTHHANDS) registerEnum(RETURNVALUE_TOOFARAWAY) registerEnum(RETURNVALUE_FIRSTGODOWNSTAIRS) registerEnum(RETURNVALUE_FIRSTGOUPSTAIRS) registerEnum(RETURNVALUE_CONTAINERNOTENOUGHROOM) registerEnum(RETURNVALUE_NOTENOUGHCAPACITY) registerEnum(RETURNVALUE_CANNOTPICKUP) registerEnum(RETURNVALUE_THISISIMPOSSIBLE) registerEnum(RETURNVALUE_DEPOTISFULL) registerEnum(RETURNVALUE_CREATUREDOESNOTEXIST) registerEnum(RETURNVALUE_CANNOTUSETHISOBJECT) registerEnum(RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE) registerEnum(RETURNVALUE_NOTREQUIREDLEVELTOUSERUNE) registerEnum(RETURNVALUE_YOUAREALREADYTRADING) registerEnum(RETURNVALUE_THISPLAYERISALREADYTRADING) registerEnum(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT) registerEnum(RETURNVALUE_DIRECTPLAYERSHOOT) registerEnum(RETURNVALUE_NOTENOUGHLEVEL) registerEnum(RETURNVALUE_NOTENOUGHMAGICLEVEL) registerEnum(RETURNVALUE_NOTENOUGHMANA) registerEnum(RETURNVALUE_NOTENOUGHSOUL) registerEnum(RETURNVALUE_YOUAREEXHAUSTED) registerEnum(RETURNVALUE_PLAYERISNOTREACHABLE) registerEnum(RETURNVALUE_CANONLYUSETHISRUNEONCREATURES) registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE) registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISPLAYER) registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONINPROTECTIONZONE) registerEnum(RETURNVALUE_YOUMAYNOTATTACKAPERSONWHILEINPROTECTIONZONE) registerEnum(RETURNVALUE_YOUMAYNOTATTACKTHISCREATURE) registerEnum(RETURNVALUE_YOUCANONLYUSEITONCREATURES) registerEnum(RETURNVALUE_CREATUREISNOTREACHABLE) registerEnum(RETURNVALUE_TURNSECUREMODETOATTACKUNMARKEDPLAYERS) registerEnum(RETURNVALUE_YOUNEEDPREMIUMACCOUNT) registerEnum(RETURNVALUE_YOUNEEDTOLEARNTHISSPELL) registerEnum(RETURNVALUE_YOURVOCATIONCANNOTUSETHISSPELL) registerEnum(RETURNVALUE_YOUNEEDAWEAPONTOUSETHISSPELL) registerEnum(RETURNVALUE_PLAYERISPZLOCKEDLEAVEPVPZONE) registerEnum(RETURNVALUE_PLAYERISPZLOCKEDENTERPVPZONE) registerEnum(RETURNVALUE_ACTIONNOTPERMITTEDINANOPVPZONE) registerEnum(RETURNVALUE_YOUCANNOTLOGOUTHERE) registerEnum(RETURNVALUE_YOUNEEDAMAGICITEMTOCASTSPELL) registerEnum(RETURNVALUE_CANNOTCONJUREITEMHERE) registerEnum(RETURNVALUE_YOUNEEDTOSPLITYOURSPEARS) registerEnum(RETURNVALUE_NAMEISTOOAMBIGUOUS) registerEnum(RETURNVALUE_CANONLYUSEONESHIELD) registerEnum(RETURNVALUE_NOPARTYMEMBERSINRANGE) registerEnum(RETURNVALUE_YOUARENOTTHEOWNER) // _G registerGlobalVariable("INDEX_WHEREEVER", INDEX_WHEREEVER); registerGlobalBoolean("VIRTUAL_PARENT", true); registerGlobalMethod("isType", LuaScriptInterface::luaIsType); registerGlobalMethod("rawgetmetatable", LuaScriptInterface::luaRawGetMetatable); // configKeys registerTable("configKeys"); registerEnumIn("configKeys", ConfigManager::ALLOW_CHANGEOUTFIT) registerEnumIn("configKeys", ConfigManager::ONE_PLAYER_ON_ACCOUNT) registerEnumIn("configKeys", ConfigManager::AIMBOT_HOTKEY_ENABLED) registerEnumIn("configKeys", ConfigManager::REMOVE_RUNE_CHARGES) registerEnumIn("configKeys", ConfigManager::EXPERIENCE_FROM_PLAYERS) registerEnumIn("configKeys", ConfigManager::FREE_PREMIUM) registerEnumIn("configKeys", ConfigManager::REPLACE_KICK_ON_LOGIN) registerEnumIn("configKeys", ConfigManager::ALLOW_CLONES) registerEnumIn("configKeys", ConfigManager::BIND_ONLY_GLOBAL_ADDRESS) registerEnumIn("configKeys", ConfigManager::OPTIMIZE_DATABASE) registerEnumIn("configKeys", ConfigManager::MARKET_PREMIUM) registerEnumIn("configKeys", ConfigManager::EMOTE_SPELLS) registerEnumIn("configKeys", ConfigManager::STAMINA_SYSTEM) registerEnumIn("configKeys", ConfigManager::WARN_UNSAFE_SCRIPTS) registerEnumIn("configKeys", ConfigManager::CONVERT_UNSAFE_SCRIPTS) registerEnumIn("configKeys", ConfigManager::CLASSIC_EQUIPMENT_SLOTS) registerEnumIn("configKeys", ConfigManager::MAP_NAME) registerEnumIn("configKeys", ConfigManager::HOUSE_RENT_PERIOD) registerEnumIn("configKeys", ConfigManager::SERVER_NAME) registerEnumIn("configKeys", ConfigManager::OWNER_NAME) registerEnumIn("configKeys", ConfigManager::OWNER_EMAIL) registerEnumIn("configKeys", ConfigManager::URL) registerEnumIn("configKeys", ConfigManager::LOCATION) registerEnumIn("configKeys", ConfigManager::IP) registerEnumIn("configKeys", ConfigManager::MOTD) registerEnumIn("configKeys", ConfigManager::WORLD_TYPE) registerEnumIn("configKeys", ConfigManager::MYSQL_HOST) registerEnumIn("configKeys", ConfigManager::MYSQL_USER) registerEnumIn("configKeys", ConfigManager::MYSQL_PASS) registerEnumIn("configKeys", ConfigManager::MYSQL_DB) registerEnumIn("configKeys", ConfigManager::MYSQL_SOCK) registerEnumIn("configKeys", ConfigManager::DEFAULT_PRIORITY) registerEnumIn("configKeys", ConfigManager::MAP_AUTHOR) registerEnumIn("configKeys", ConfigManager::SQL_PORT) registerEnumIn("configKeys", ConfigManager::MAX_PLAYERS) registerEnumIn("configKeys", ConfigManager::PZ_LOCKED) registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRANGE) registerEnumIn("configKeys", ConfigManager::DEFAULT_DESPAWNRADIUS) registerEnumIn("configKeys", ConfigManager::RATE_EXPERIENCE) registerEnumIn("configKeys", ConfigManager::RATE_SKILL) registerEnumIn("configKeys", ConfigManager::RATE_LOOT) registerEnumIn("configKeys", ConfigManager::RATE_MAGIC) registerEnumIn("configKeys", ConfigManager::RATE_SPAWN) registerEnumIn("configKeys", ConfigManager::HOUSE_PRICE) registerEnumIn("configKeys", ConfigManager::KILLS_TO_RED) registerEnumIn("configKeys", ConfigManager::KILLS_TO_BLACK) registerEnumIn("configKeys", ConfigManager::MAX_MESSAGEBUFFER) registerEnumIn("configKeys", ConfigManager::ACTIONS_DELAY_INTERVAL) registerEnumIn("configKeys", ConfigManager::EX_ACTIONS_DELAY_INTERVAL) registerEnumIn("configKeys", ConfigManager::KICK_AFTER_MINUTES) registerEnumIn("configKeys", ConfigManager::PROTECTION_LEVEL) registerEnumIn("configKeys", ConfigManager::DEATH_LOSE_PERCENT) registerEnumIn("configKeys", ConfigManager::STATUSQUERY_TIMEOUT) registerEnumIn("configKeys", ConfigManager::FRAG_TIME) registerEnumIn("configKeys", ConfigManager::WHITE_SKULL_TIME) registerEnumIn("configKeys", ConfigManager::GAME_PORT) registerEnumIn("configKeys", ConfigManager::LOGIN_PORT) registerEnumIn("configKeys", ConfigManager::STATUS_PORT) registerEnumIn("configKeys", ConfigManager::STAIRHOP_DELAY) registerEnumIn("configKeys", ConfigManager::MARKET_OFFER_DURATION) registerEnumIn("configKeys", ConfigManager::CHECK_EXPIRED_MARKET_OFFERS_EACH_MINUTES) registerEnumIn("configKeys", ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER) registerEnumIn("configKeys", ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE) registerEnumIn("configKeys", ConfigManager::MAX_PACKETS_PER_SECOND) // os registerMethod("os", "mtime", LuaScriptInterface::luaSystemTime); // table registerMethod("table", "create", LuaScriptInterface::luaTableCreate); // Game registerTable("Game"); registerMethod("Game", "getSpectators", LuaScriptInterface::luaGameGetSpectators); registerMethod("Game", "getPlayers", LuaScriptInterface::luaGameGetPlayers); registerMethod("Game", "loadMap", LuaScriptInterface::luaGameLoadMap); registerMethod("Game", "getExperienceStage", LuaScriptInterface::luaGameGetExperienceStage); registerMethod("Game", "getMonsterCount", LuaScriptInterface::luaGameGetMonsterCount); registerMethod("Game", "getPlayerCount", LuaScriptInterface::luaGameGetPlayerCount); registerMethod("Game", "getNpcCount", LuaScriptInterface::luaGameGetNpcCount); registerMethod("Game", "getTowns", LuaScriptInterface::luaGameGetTowns); registerMethod("Game", "getHouses", LuaScriptInterface::luaGameGetHouses); registerMethod("Game", "getGameState", LuaScriptInterface::luaGameGetGameState); registerMethod("Game", "setGameState", LuaScriptInterface::luaGameSetGameState); registerMethod("Game", "getWorldType", LuaScriptInterface::luaGameGetWorldType); registerMethod("Game", "setWorldType", LuaScriptInterface::luaGameSetWorldType); registerMethod("Game", "getReturnMessage", LuaScriptInterface::luaGameGetReturnMessage); registerMethod("Game", "createItem", LuaScriptInterface::luaGameCreateItem); registerMethod("Game", "createContainer", LuaScriptInterface::luaGameCreateContainer); registerMethod("Game", "createMonster", LuaScriptInterface::luaGameCreateMonster); registerMethod("Game", "createNpc", LuaScriptInterface::luaGameCreateNpc); registerMethod("Game", "createTile", LuaScriptInterface::luaGameCreateTile); registerMethod("Game", "startRaid", LuaScriptInterface::luaGameStartRaid); // Variant registerClass("Variant", "", LuaScriptInterface::luaVariantCreate); registerMethod("Variant", "getNumber", LuaScriptInterface::luaVariantGetNumber); registerMethod("Variant", "getString", LuaScriptInterface::luaVariantGetString); registerMethod("Variant", "getPosition", LuaScriptInterface::luaVariantGetPosition); // Position registerClass("Position", "", LuaScriptInterface::luaPositionCreate); registerMetaMethod("Position", "__add", LuaScriptInterface::luaPositionAdd); registerMetaMethod("Position", "__sub", LuaScriptInterface::luaPositionSub); registerMetaMethod("Position", "__eq", LuaScriptInterface::luaPositionCompare); registerMethod("Position", "getDistance", LuaScriptInterface::luaPositionGetDistance); registerMethod("Position", "isSightClear", LuaScriptInterface::luaPositionIsSightClear); registerMethod("Position", "sendMagicEffect", LuaScriptInterface::luaPositionSendMagicEffect); registerMethod("Position", "sendDistanceEffect", LuaScriptInterface::luaPositionSendDistanceEffect); // Tile registerClass("Tile", "", LuaScriptInterface::luaTileCreate); registerMetaMethod("Tile", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Tile", "getPosition", LuaScriptInterface::luaTileGetPosition); registerMethod("Tile", "getGround", LuaScriptInterface::luaTileGetGround); registerMethod("Tile", "getThing", LuaScriptInterface::luaTileGetThing); registerMethod("Tile", "getThingCount", LuaScriptInterface::luaTileGetThingCount); registerMethod("Tile", "getTopVisibleThing", LuaScriptInterface::luaTileGetTopVisibleThing); registerMethod("Tile", "getTopTopItem", LuaScriptInterface::luaTileGetTopTopItem); registerMethod("Tile", "getTopDownItem", LuaScriptInterface::luaTileGetTopDownItem); registerMethod("Tile", "getFieldItem", LuaScriptInterface::luaTileGetFieldItem); registerMethod("Tile", "getItemById", LuaScriptInterface::luaTileGetItemById); registerMethod("Tile", "getItemByType", LuaScriptInterface::luaTileGetItemByType); registerMethod("Tile", "getItemByTopOrder", LuaScriptInterface::luaTileGetItemByTopOrder); registerMethod("Tile", "getItemCountById", LuaScriptInterface::luaTileGetItemCountById); registerMethod("Tile", "getBottomCreature", LuaScriptInterface::luaTileGetBottomCreature); registerMethod("Tile", "getTopCreature", LuaScriptInterface::luaTileGetTopCreature); registerMethod("Tile", "getBottomVisibleCreature", LuaScriptInterface::luaTileGetBottomVisibleCreature); registerMethod("Tile", "getTopVisibleCreature", LuaScriptInterface::luaTileGetTopVisibleCreature); registerMethod("Tile", "getItems", LuaScriptInterface::luaTileGetItems); registerMethod("Tile", "getItemCount", LuaScriptInterface::luaTileGetItemCount); registerMethod("Tile", "getDownItemCount", LuaScriptInterface::luaTileGetDownItemCount); registerMethod("Tile", "getTopItemCount", LuaScriptInterface::luaTileGetTopItemCount); registerMethod("Tile", "getCreatures", LuaScriptInterface::luaTileGetCreatures); registerMethod("Tile", "getCreatureCount", LuaScriptInterface::luaTileGetCreatureCount); registerMethod("Tile", "getThingIndex", LuaScriptInterface::luaTileGetThingIndex); registerMethod("Tile", "hasProperty", LuaScriptInterface::luaTileHasProperty); registerMethod("Tile", "hasFlag", LuaScriptInterface::luaTileHasFlag); registerMethod("Tile", "queryAdd", LuaScriptInterface::luaTileQueryAdd); registerMethod("Tile", "getHouse", LuaScriptInterface::luaTileGetHouse); // NetworkMessage registerClass("NetworkMessage", "", LuaScriptInterface::luaNetworkMessageCreate); registerMetaMethod("NetworkMessage", "__eq", LuaScriptInterface::luaUserdataCompare); registerMetaMethod("NetworkMessage", "__gc", LuaScriptInterface::luaNetworkMessageDelete); registerMethod("NetworkMessage", "delete", LuaScriptInterface::luaNetworkMessageDelete); registerMethod("NetworkMessage", "getByte", LuaScriptInterface::luaNetworkMessageGetByte); registerMethod("NetworkMessage", "getU16", LuaScriptInterface::luaNetworkMessageGetU16); registerMethod("NetworkMessage", "getU32", LuaScriptInterface::luaNetworkMessageGetU32); registerMethod("NetworkMessage", "getU64", LuaScriptInterface::luaNetworkMessageGetU64); registerMethod("NetworkMessage", "getString", LuaScriptInterface::luaNetworkMessageGetString); registerMethod("NetworkMessage", "getPosition", LuaScriptInterface::luaNetworkMessageGetPosition); registerMethod("NetworkMessage", "addByte", LuaScriptInterface::luaNetworkMessageAddByte); registerMethod("NetworkMessage", "addU16", LuaScriptInterface::luaNetworkMessageAddU16); registerMethod("NetworkMessage", "addU32", LuaScriptInterface::luaNetworkMessageAddU32); registerMethod("NetworkMessage", "addU64", LuaScriptInterface::luaNetworkMessageAddU64); registerMethod("NetworkMessage", "addString", LuaScriptInterface::luaNetworkMessageAddString); registerMethod("NetworkMessage", "addPosition", LuaScriptInterface::luaNetworkMessageAddPosition); registerMethod("NetworkMessage", "addDouble", LuaScriptInterface::luaNetworkMessageAddDouble); registerMethod("NetworkMessage", "addItem", LuaScriptInterface::luaNetworkMessageAddItem); registerMethod("NetworkMessage", "addItemId", LuaScriptInterface::luaNetworkMessageAddItemId); registerMethod("NetworkMessage", "reset", LuaScriptInterface::luaNetworkMessageReset); registerMethod("NetworkMessage", "skipBytes", LuaScriptInterface::luaNetworkMessageSkipBytes); registerMethod("NetworkMessage", "sendToPlayer", LuaScriptInterface::luaNetworkMessageSendToPlayer); // ModalWindow registerClass("ModalWindow", "", LuaScriptInterface::luaModalWindowCreate); registerMetaMethod("ModalWindow", "__eq", LuaScriptInterface::luaUserdataCompare); registerMetaMethod("ModalWindow", "__gc", LuaScriptInterface::luaModalWindowDelete); registerMethod("ModalWindow", "delete", LuaScriptInterface::luaModalWindowDelete); registerMethod("ModalWindow", "getId", LuaScriptInterface::luaModalWindowGetId); registerMethod("ModalWindow", "getTitle", LuaScriptInterface::luaModalWindowGetTitle); registerMethod("ModalWindow", "getMessage", LuaScriptInterface::luaModalWindowGetMessage); registerMethod("ModalWindow", "setTitle", LuaScriptInterface::luaModalWindowSetTitle); registerMethod("ModalWindow", "setMessage", LuaScriptInterface::luaModalWindowSetMessage); registerMethod("ModalWindow", "getButtonCount", LuaScriptInterface::luaModalWindowGetButtonCount); registerMethod("ModalWindow", "getChoiceCount", LuaScriptInterface::luaModalWindowGetChoiceCount); registerMethod("ModalWindow", "addButton", LuaScriptInterface::luaModalWindowAddButton); registerMethod("ModalWindow", "addChoice", LuaScriptInterface::luaModalWindowAddChoice); registerMethod("ModalWindow", "getDefaultEnterButton", LuaScriptInterface::luaModalWindowGetDefaultEnterButton); registerMethod("ModalWindow", "setDefaultEnterButton", LuaScriptInterface::luaModalWindowSetDefaultEnterButton); registerMethod("ModalWindow", "getDefaultEscapeButton", LuaScriptInterface::luaModalWindowGetDefaultEscapeButton); registerMethod("ModalWindow", "setDefaultEscapeButton", LuaScriptInterface::luaModalWindowSetDefaultEscapeButton); registerMethod("ModalWindow", "hasPriority", LuaScriptInterface::luaModalWindowHasPriority); registerMethod("ModalWindow", "setPriority", LuaScriptInterface::luaModalWindowSetPriority); registerMethod("ModalWindow", "sendToPlayer", LuaScriptInterface::luaModalWindowSendToPlayer); // Item registerClass("Item", "", LuaScriptInterface::luaItemCreate); registerMetaMethod("Item", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Item", "isItem", LuaScriptInterface::luaItemIsItem); registerMethod("Item", "getParent", LuaScriptInterface::luaItemGetParent); registerMethod("Item", "getTopParent", LuaScriptInterface::luaItemGetTopParent); registerMethod("Item", "getId", LuaScriptInterface::luaItemGetId); registerMethod("Item", "clone", LuaScriptInterface::luaItemClone); registerMethod("Item", "split", LuaScriptInterface::luaItemSplit); registerMethod("Item", "remove", LuaScriptInterface::luaItemRemove); registerMethod("Item", "getUniqueId", LuaScriptInterface::luaItemGetUniqueId); registerMethod("Item", "getActionId", LuaScriptInterface::luaItemGetActionId); registerMethod("Item", "setActionId", LuaScriptInterface::luaItemSetActionId); registerMethod("Item", "getCount", LuaScriptInterface::luaItemGetCount); registerMethod("Item", "getCharges", LuaScriptInterface::luaItemGetCharges); registerMethod("Item", "getFluidType", LuaScriptInterface::luaItemGetFluidType); registerMethod("Item", "getWeight", LuaScriptInterface::luaItemGetWeight); registerMethod("Item", "getSubType", LuaScriptInterface::luaItemGetSubType); registerMethod("Item", "getName", LuaScriptInterface::luaItemGetName); registerMethod("Item", "getPluralName", LuaScriptInterface::luaItemGetPluralName); registerMethod("Item", "getArticle", LuaScriptInterface::luaItemGetArticle); registerMethod("Item", "getPosition", LuaScriptInterface::luaItemGetPosition); registerMethod("Item", "getTile", LuaScriptInterface::luaItemGetTile); registerMethod("Item", "hasAttribute", LuaScriptInterface::luaItemHasAttribute); registerMethod("Item", "getAttribute", LuaScriptInterface::luaItemGetAttribute); registerMethod("Item", "setAttribute", LuaScriptInterface::luaItemSetAttribute); registerMethod("Item", "removeAttribute", LuaScriptInterface::luaItemRemoveAttribute); registerMethod("Item", "moveTo", LuaScriptInterface::luaItemMoveTo); registerMethod("Item", "transform", LuaScriptInterface::luaItemTransform); registerMethod("Item", "decay", LuaScriptInterface::luaItemDecay); registerMethod("Item", "getDescription", LuaScriptInterface::luaItemGetDescription); registerMethod("Item", "hasProperty", LuaScriptInterface::luaItemHasProperty); // Container registerClass("Container", "Item", LuaScriptInterface::luaContainerCreate); registerMetaMethod("Container", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Container", "getSize", LuaScriptInterface::luaContainerGetSize); registerMethod("Container", "getCapacity", LuaScriptInterface::luaContainerGetCapacity); registerMethod("Container", "getEmptySlots", LuaScriptInterface::luaContainerGetEmptySlots); registerMethod("Container", "getItemHoldingCount", LuaScriptInterface::luaContainerGetItemHoldingCount); registerMethod("Container", "getItemCountById", LuaScriptInterface::luaContainerGetItemCountById); registerMethod("Container", "getItem", LuaScriptInterface::luaContainerGetItem); registerMethod("Container", "hasItem", LuaScriptInterface::luaContainerHasItem); registerMethod("Container", "addItem", LuaScriptInterface::luaContainerAddItem); registerMethod("Container", "addItemEx", LuaScriptInterface::luaContainerAddItemEx); // Teleport registerClass("Teleport", "Item", LuaScriptInterface::luaTeleportCreate); registerMetaMethod("Teleport", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Teleport", "getDestination", LuaScriptInterface::luaTeleportGetDestination); registerMethod("Teleport", "setDestination", LuaScriptInterface::luaTeleportSetDestination); // Creature registerClass("Creature", "", LuaScriptInterface::luaCreatureCreate); registerMetaMethod("Creature", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Creature", "getEvents", LuaScriptInterface::luaCreatureGetEvents); registerMethod("Creature", "registerEvent", LuaScriptInterface::luaCreatureRegisterEvent); registerMethod("Creature", "unregisterEvent", LuaScriptInterface::luaCreatureUnregisterEvent); registerMethod("Creature", "isRemoved", LuaScriptInterface::luaCreatureIsRemoved); registerMethod("Creature", "isCreature", LuaScriptInterface::luaCreatureIsCreature); registerMethod("Creature", "isInGhostMode", LuaScriptInterface::luaCreatureIsInGhostMode); registerMethod("Creature", "isHealthHidden", LuaScriptInterface::luaCreatureIsHealthHidden); registerMethod("Creature", "canSee", LuaScriptInterface::luaCreatureCanSee); registerMethod("Creature", "canSeeCreature", LuaScriptInterface::luaCreatureCanSeeCreature); registerMethod("Creature", "getParent", LuaScriptInterface::luaCreatureGetParent); registerMethod("Creature", "getId", LuaScriptInterface::luaCreatureGetId); registerMethod("Creature", "getName", LuaScriptInterface::luaCreatureGetName); registerMethod("Creature", "getTarget", LuaScriptInterface::luaCreatureGetTarget); registerMethod("Creature", "setTarget", LuaScriptInterface::luaCreatureSetTarget); registerMethod("Creature", "getFollowCreature", LuaScriptInterface::luaCreatureGetFollowCreature); registerMethod("Creature", "setFollowCreature", LuaScriptInterface::luaCreatureSetFollowCreature); registerMethod("Creature", "getMaster", LuaScriptInterface::luaCreatureGetMaster); registerMethod("Creature", "setMaster", LuaScriptInterface::luaCreatureSetMaster); registerMethod("Creature", "getLight", LuaScriptInterface::luaCreatureGetLight); registerMethod("Creature", "setLight", LuaScriptInterface::luaCreatureSetLight); registerMethod("Creature", "getSpeed", LuaScriptInterface::luaCreatureGetSpeed); registerMethod("Creature", "getBaseSpeed", LuaScriptInterface::luaCreatureGetBaseSpeed); registerMethod("Creature", "changeSpeed", LuaScriptInterface::luaCreatureChangeSpeed); registerMethod("Creature", "setDropLoot", LuaScriptInterface::luaCreatureSetDropLoot); registerMethod("Creature", "getPosition", LuaScriptInterface::luaCreatureGetPosition); registerMethod("Creature", "getTile", LuaScriptInterface::luaCreatureGetTile); registerMethod("Creature", "getDirection", LuaScriptInterface::luaCreatureGetDirection); registerMethod("Creature", "setDirection", LuaScriptInterface::luaCreatureSetDirection); registerMethod("Creature", "getHealth", LuaScriptInterface::luaCreatureGetHealth); registerMethod("Creature", "addHealth", LuaScriptInterface::luaCreatureAddHealth); registerMethod("Creature", "getMaxHealth", LuaScriptInterface::luaCreatureGetMaxHealth); registerMethod("Creature", "setMaxHealth", LuaScriptInterface::luaCreatureSetMaxHealth); registerMethod("Creature", "setHiddenHealth", LuaScriptInterface::luaCreatureSetHiddenHealth); registerMethod("Creature", "getMana", LuaScriptInterface::luaCreatureGetMana); registerMethod("Creature", "addMana", LuaScriptInterface::luaCreatureAddMana); registerMethod("Creature", "getMaxMana", LuaScriptInterface::luaCreatureGetMaxMana); registerMethod("Creature", "getSkull", LuaScriptInterface::luaCreatureGetSkull); registerMethod("Creature", "setSkull", LuaScriptInterface::luaCreatureSetSkull); registerMethod("Creature", "getOutfit", LuaScriptInterface::luaCreatureGetOutfit); registerMethod("Creature", "setOutfit", LuaScriptInterface::luaCreatureSetOutfit); registerMethod("Creature", "getCondition", LuaScriptInterface::luaCreatureGetCondition); registerMethod("Creature", "addCondition", LuaScriptInterface::luaCreatureAddCondition); registerMethod("Creature", "removeCondition", LuaScriptInterface::luaCreatureRemoveCondition); registerMethod("Creature", "remove", LuaScriptInterface::luaCreatureRemove); registerMethod("Creature", "teleportTo", LuaScriptInterface::luaCreatureTeleportTo); registerMethod("Creature", "say", LuaScriptInterface::luaCreatureSay); registerMethod("Creature", "getDamageMap", LuaScriptInterface::luaCreatureGetDamageMap); registerMethod("Creature", "getSummons", LuaScriptInterface::luaCreatureGetSummons); registerMethod("Creature", "getDescription", LuaScriptInterface::luaCreatureGetDescription); registerMethod("Creature", "getPathTo", LuaScriptInterface::luaCreatureGetPathTo); // Player registerClass("Player", "Creature", LuaScriptInterface::luaPlayerCreate); registerMetaMethod("Player", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Player", "isPlayer", LuaScriptInterface::luaPlayerIsPlayer); registerMethod("Player", "getGuid", LuaScriptInterface::luaPlayerGetGuid); registerMethod("Player", "getIp", LuaScriptInterface::luaPlayerGetIp); registerMethod("Player", "getAccountId", LuaScriptInterface::luaPlayerGetAccountId); registerMethod("Player", "getLastLoginSaved", LuaScriptInterface::luaPlayerGetLastLoginSaved); registerMethod("Player", "getLastLogout", LuaScriptInterface::luaPlayerGetLastLogout); registerMethod("Player", "getAccountType", LuaScriptInterface::luaPlayerGetAccountType); registerMethod("Player", "setAccountType", LuaScriptInterface::luaPlayerSetAccountType); registerMethod("Player", "getCapacity", LuaScriptInterface::luaPlayerGetCapacity); registerMethod("Player", "setCapacity", LuaScriptInterface::luaPlayerSetCapacity); registerMethod("Player", "getFreeCapacity", LuaScriptInterface::luaPlayerGetFreeCapacity); registerMethod("Player", "getDepotChest", LuaScriptInterface::luaPlayerGetDepotChest); registerMethod("Player", "getInbox", LuaScriptInterface::luaPlayerGetInbox); registerMethod("Player", "getSkullTime", LuaScriptInterface::luaPlayerGetSkullTime); registerMethod("Player", "setSkullTime", LuaScriptInterface::luaPlayerSetSkullTime); registerMethod("Player", "getDeathPenalty", LuaScriptInterface::luaPlayerGetDeathPenalty); registerMethod("Player", "getExperience", LuaScriptInterface::luaPlayerGetExperience); registerMethod("Player", "addExperience", LuaScriptInterface::luaPlayerAddExperience); registerMethod("Player", "removeExperience", LuaScriptInterface::luaPlayerRemoveExperience); registerMethod("Player", "getLevel", LuaScriptInterface::luaPlayerGetLevel); registerMethod("Player", "getMagicLevel", LuaScriptInterface::luaPlayerGetMagicLevel); registerMethod("Player", "getBaseMagicLevel", LuaScriptInterface::luaPlayerGetBaseMagicLevel); registerMethod("Player", "setMaxMana", LuaScriptInterface::luaPlayerSetMaxMana); registerMethod("Player", "getManaSpent", LuaScriptInterface::luaPlayerGetManaSpent); registerMethod("Player", "addManaSpent", LuaScriptInterface::luaPlayerAddManaSpent); registerMethod("Player", "getBaseMaxHealth", LuaScriptInterface::luaPlayerGetBaseMaxHealth); registerMethod("Player", "getBaseMaxMana", LuaScriptInterface::luaPlayerGetBaseMaxMana); registerMethod("Player", "getSkillLevel", LuaScriptInterface::luaPlayerGetSkillLevel); registerMethod("Player", "getEffectiveSkillLevel", LuaScriptInterface::luaPlayerGetEffectiveSkillLevel); registerMethod("Player", "getSkillPercent", LuaScriptInterface::luaPlayerGetSkillPercent); registerMethod("Player", "getSkillTries", LuaScriptInterface::luaPlayerGetSkillTries); registerMethod("Player", "addSkillTries", LuaScriptInterface::luaPlayerAddSkillTries); registerMethod("Player", "addOfflineTrainingTime", LuaScriptInterface::luaPlayerAddOfflineTrainingTime); registerMethod("Player", "getOfflineTrainingTime", LuaScriptInterface::luaPlayerGetOfflineTrainingTime); registerMethod("Player", "removeOfflineTrainingTime", LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime); registerMethod("Player", "addOfflineTrainingTries", LuaScriptInterface::luaPlayerAddOfflineTrainingTries); registerMethod("Player", "getOfflineTrainingSkill", LuaScriptInterface::luaPlayerGetOfflineTrainingSkill); registerMethod("Player", "setOfflineTrainingSkill", LuaScriptInterface::luaPlayerSetOfflineTrainingSkill); registerMethod("Player", "getItemCount", LuaScriptInterface::luaPlayerGetItemCount); registerMethod("Player", "getItemById", LuaScriptInterface::luaPlayerGetItemById); registerMethod("Player", "getVocation", LuaScriptInterface::luaPlayerGetVocation); registerMethod("Player", "setVocation", LuaScriptInterface::luaPlayerSetVocation); registerMethod("Player", "getSex", LuaScriptInterface::luaPlayerGetSex); registerMethod("Player", "setSex", LuaScriptInterface::luaPlayerSetSex); registerMethod("Player", "getTown", LuaScriptInterface::luaPlayerGetTown); registerMethod("Player", "setTown", LuaScriptInterface::luaPlayerSetTown); registerMethod("Player", "getGuild", LuaScriptInterface::luaPlayerGetGuild); registerMethod("Player", "setGuild", LuaScriptInterface::luaPlayerSetGuild); registerMethod("Player", "getGuildLevel", LuaScriptInterface::luaPlayerGetGuildLevel); registerMethod("Player", "setGuildLevel", LuaScriptInterface::luaPlayerSetGuildLevel); registerMethod("Player", "getGuildNick", LuaScriptInterface::luaPlayerGetGuildNick); registerMethod("Player", "setGuildNick", LuaScriptInterface::luaPlayerSetGuildNick); registerMethod("Player", "getGroup", LuaScriptInterface::luaPlayerGetGroup); registerMethod("Player", "setGroup", LuaScriptInterface::luaPlayerSetGroup); registerMethod("Player", "getStamina", LuaScriptInterface::luaPlayerGetStamina); registerMethod("Player", "setStamina", LuaScriptInterface::luaPlayerSetStamina); registerMethod("Player", "getSoul", LuaScriptInterface::luaPlayerGetSoul); registerMethod("Player", "addSoul", LuaScriptInterface::luaPlayerAddSoul); registerMethod("Player", "getMaxSoul", LuaScriptInterface::luaPlayerGetMaxSoul); registerMethod("Player", "getBankBalance", LuaScriptInterface::luaPlayerGetBankBalance); registerMethod("Player", "setBankBalance", LuaScriptInterface::luaPlayerSetBankBalance); registerMethod("Player", "getStorageValue", LuaScriptInterface::luaPlayerGetStorageValue); registerMethod("Player", "setStorageValue", LuaScriptInterface::luaPlayerSetStorageValue); registerMethod("Player", "addItem", LuaScriptInterface::luaPlayerAddItem); registerMethod("Player", "addItemEx", LuaScriptInterface::luaPlayerAddItemEx); registerMethod("Player", "removeItem", LuaScriptInterface::luaPlayerRemoveItem); registerMethod("Player", "getMoney", LuaScriptInterface::luaPlayerGetMoney); registerMethod("Player", "addMoney", LuaScriptInterface::luaPlayerAddMoney); registerMethod("Player", "removeMoney", LuaScriptInterface::luaPlayerRemoveMoney); registerMethod("Player", "showTextDialog", LuaScriptInterface::luaPlayerShowTextDialog); registerMethod("Player", "sendTextMessage", LuaScriptInterface::luaPlayerSendTextMessage); registerMethod("Player", "sendChannelMessage", LuaScriptInterface::luaPlayerSendChannelMessage); registerMethod("Player", "sendPrivateMessage", LuaScriptInterface::luaPlayerSendPrivateMessage); registerMethod("Player", "channelSay", LuaScriptInterface::luaPlayerChannelSay); registerMethod("Player", "openChannel", LuaScriptInterface::luaPlayerOpenChannel); registerMethod("Player", "getSlotItem", LuaScriptInterface::luaPlayerGetSlotItem); registerMethod("Player", "getParty", LuaScriptInterface::luaPlayerGetParty); registerMethod("Player", "addOutfit", LuaScriptInterface::luaPlayerAddOutfit); registerMethod("Player", "addOutfitAddon", LuaScriptInterface::luaPlayerAddOutfitAddon); registerMethod("Player", "removeOutfit", LuaScriptInterface::luaPlayerRemoveOutfit); registerMethod("Player", "removeOutfitAddon", LuaScriptInterface::luaPlayerRemoveOutfitAddon); registerMethod("Player", "hasOutfit", LuaScriptInterface::luaPlayerHasOutfit); registerMethod("Player", "sendOutfitWindow", LuaScriptInterface::luaPlayerSendOutfitWindow); registerMethod("Player", "addMount", LuaScriptInterface::luaPlayerAddMount); registerMethod("Player", "removeMount", LuaScriptInterface::luaPlayerRemoveMount); registerMethod("Player", "hasMount", LuaScriptInterface::luaPlayerHasMount); registerMethod("Player", "getPremiumDays", LuaScriptInterface::luaPlayerGetPremiumDays); registerMethod("Player", "addPremiumDays", LuaScriptInterface::luaPlayerAddPremiumDays); registerMethod("Player", "removePremiumDays", LuaScriptInterface::luaPlayerRemovePremiumDays); registerMethod("Player", "hasBlessing", LuaScriptInterface::luaPlayerHasBlessing); registerMethod("Player", "addBlessing", LuaScriptInterface::luaPlayerAddBlessing); registerMethod("Player", "removeBlessing", LuaScriptInterface::luaPlayerRemoveBlessing); registerMethod("Player", "canLearnSpell", LuaScriptInterface::luaPlayerCanLearnSpell); registerMethod("Player", "learnSpell", LuaScriptInterface::luaPlayerLearnSpell); registerMethod("Player", "forgetSpell", LuaScriptInterface::luaPlayerForgetSpell); registerMethod("Player", "hasLearnedSpell", LuaScriptInterface::luaPlayerHasLearnedSpell); registerMethod("Player", "sendTutorial", LuaScriptInterface::luaPlayerSendTutorial); registerMethod("Player", "addMapMark", LuaScriptInterface::luaPlayerAddMapMark); registerMethod("Player", "save", LuaScriptInterface::luaPlayerSave); registerMethod("Player", "popupFYI", LuaScriptInterface::luaPlayerPopupFYI); registerMethod("Player", "isPzLocked", LuaScriptInterface::luaPlayerIsPzLocked); registerMethod("Player", "getClient", LuaScriptInterface::luaPlayerGetClient); registerMethod("Player", "getHouse", LuaScriptInterface::luaPlayerGetHouse); registerMethod("Player", "setGhostMode", LuaScriptInterface::luaPlayerSetGhostMode); registerMethod("Player", "getContainerId", LuaScriptInterface::luaPlayerGetContainerId); registerMethod("Player", "getContainerById", LuaScriptInterface::luaPlayerGetContainerById); registerMethod("Player", "getContainerIndex", LuaScriptInterface::luaPlayerGetContainerIndex); // Monster registerClass("Monster", "Creature", LuaScriptInterface::luaMonsterCreate); registerMetaMethod("Monster", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Monster", "isMonster", LuaScriptInterface::luaMonsterIsMonster); registerMethod("Monster", "getType", LuaScriptInterface::luaMonsterGetType); registerMethod("Monster", "getSpawnPosition", LuaScriptInterface::luaMonsterGetSpawnPosition); registerMethod("Monster", "isInSpawnRange", LuaScriptInterface::luaMonsterIsInSpawnRange); registerMethod("Monster", "isIdle", LuaScriptInterface::luaMonsterIsIdle); registerMethod("Monster", "setIdle", LuaScriptInterface::luaMonsterSetIdle); registerMethod("Monster", "isTarget", LuaScriptInterface::luaMonsterIsTarget); registerMethod("Monster", "isOpponent", LuaScriptInterface::luaMonsterIsOpponent); registerMethod("Monster", "isFriend", LuaScriptInterface::luaMonsterIsFriend); registerMethod("Monster", "addFriend", LuaScriptInterface::luaMonsterAddFriend); registerMethod("Monster", "removeFriend", LuaScriptInterface::luaMonsterRemoveFriend); registerMethod("Monster", "getFriendList", LuaScriptInterface::luaMonsterGetFriendList); registerMethod("Monster", "getFriendCount", LuaScriptInterface::luaMonsterGetFriendCount); registerMethod("Monster", "addTarget", LuaScriptInterface::luaMonsterAddTarget); registerMethod("Monster", "removeTarget", LuaScriptInterface::luaMonsterRemoveTarget); registerMethod("Monster", "getTargetList", LuaScriptInterface::luaMonsterGetTargetList); registerMethod("Monster", "getTargetCount", LuaScriptInterface::luaMonsterGetTargetCount); registerMethod("Monster", "selectTarget", LuaScriptInterface::luaMonsterSelectTarget); registerMethod("Monster", "searchTarget", LuaScriptInterface::luaMonsterSearchTarget); // Npc registerClass("Npc", "Creature", LuaScriptInterface::luaNpcCreate); registerMetaMethod("Npc", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Npc", "isNpc", LuaScriptInterface::luaNpcIsNpc); registerMethod("Npc", "setMasterPos", LuaScriptInterface::luaNpcSetMasterPos); registerMethod("Npc", "getSpeechBubble", LuaScriptInterface::luaNpcGetSpeechBubble); registerMethod("Npc", "setSpeechBubble", LuaScriptInterface::luaNpcSetSpeechBubble); // Guild registerClass("Guild", "", LuaScriptInterface::luaGuildCreate); registerMetaMethod("Guild", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Guild", "getId", LuaScriptInterface::luaGuildGetId); registerMethod("Guild", "getName", LuaScriptInterface::luaGuildGetName); registerMethod("Guild", "getMembersOnline", LuaScriptInterface::luaGuildGetMembersOnline); registerMethod("Guild", "addRank", LuaScriptInterface::luaGuildAddRank); registerMethod("Guild", "getRankById", LuaScriptInterface::luaGuildGetRankById); registerMethod("Guild", "getRankByLevel", LuaScriptInterface::luaGuildGetRankByLevel); registerMethod("Guild", "getMotd", LuaScriptInterface::luaGuildGetMotd); registerMethod("Guild", "setMotd", LuaScriptInterface::luaGuildSetMotd); // Group registerClass("Group", "", LuaScriptInterface::luaGroupCreate); registerMetaMethod("Group", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Group", "getId", LuaScriptInterface::luaGroupGetId); registerMethod("Group", "getName", LuaScriptInterface::luaGroupGetName); registerMethod("Group", "getFlags", LuaScriptInterface::luaGroupGetFlags); registerMethod("Group", "getAccess", LuaScriptInterface::luaGroupGetAccess); registerMethod("Group", "getMaxDepotItems", LuaScriptInterface::luaGroupGetMaxDepotItems); registerMethod("Group", "getMaxVipEntries", LuaScriptInterface::luaGroupGetMaxVipEntries); // Vocation registerClass("Vocation", "", LuaScriptInterface::luaVocationCreate); registerMetaMethod("Vocation", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Vocation", "getId", LuaScriptInterface::luaVocationGetId); registerMethod("Vocation", "getClientId", LuaScriptInterface::luaVocationGetClientId); registerMethod("Vocation", "getName", LuaScriptInterface::luaVocationGetName); registerMethod("Vocation", "getDescription", LuaScriptInterface::luaVocationGetDescription); registerMethod("Vocation", "getRequiredSkillTries", LuaScriptInterface::luaVocationGetRequiredSkillTries); registerMethod("Vocation", "getRequiredManaSpent", LuaScriptInterface::luaVocationGetRequiredManaSpent); registerMethod("Vocation", "getCapacityGain", LuaScriptInterface::luaVocationGetCapacityGain); registerMethod("Vocation", "getHealthGain", LuaScriptInterface::luaVocationGetHealthGain); registerMethod("Vocation", "getHealthGainTicks", LuaScriptInterface::luaVocationGetHealthGainTicks); registerMethod("Vocation", "getHealthGainAmount", LuaScriptInterface::luaVocationGetHealthGainAmount); registerMethod("Vocation", "getManaGain", LuaScriptInterface::luaVocationGetManaGain); registerMethod("Vocation", "getManaGainTicks", LuaScriptInterface::luaVocationGetManaGainTicks); registerMethod("Vocation", "getManaGainAmount", LuaScriptInterface::luaVocationGetManaGainAmount); registerMethod("Vocation", "getMaxSoul", LuaScriptInterface::luaVocationGetMaxSoul); registerMethod("Vocation", "getSoulGainTicks", LuaScriptInterface::luaVocationGetSoulGainTicks); registerMethod("Vocation", "getAttackSpeed", LuaScriptInterface::luaVocationGetAttackSpeed); registerMethod("Vocation", "getBaseSpeed", LuaScriptInterface::luaVocationGetBaseSpeed); registerMethod("Vocation", "getDemotion", LuaScriptInterface::luaVocationGetDemotion); registerMethod("Vocation", "getPromotion", LuaScriptInterface::luaVocationGetPromotion); // Town registerClass("Town", "", LuaScriptInterface::luaTownCreate); registerMetaMethod("Town", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Town", "getId", LuaScriptInterface::luaTownGetId); registerMethod("Town", "getName", LuaScriptInterface::luaTownGetName); registerMethod("Town", "getTemplePosition", LuaScriptInterface::luaTownGetTemplePosition); // House registerClass("House", "", LuaScriptInterface::luaHouseCreate); registerMetaMethod("House", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("House", "getId", LuaScriptInterface::luaHouseGetId); registerMethod("House", "getName", LuaScriptInterface::luaHouseGetName); registerMethod("House", "getTown", LuaScriptInterface::luaHouseGetTown); registerMethod("House", "getExitPosition", LuaScriptInterface::luaHouseGetExitPosition); registerMethod("House", "getRent", LuaScriptInterface::luaHouseGetRent); registerMethod("House", "getOwnerGuid", LuaScriptInterface::luaHouseGetOwnerGuid); registerMethod("House", "setOwnerGuid", LuaScriptInterface::luaHouseSetOwnerGuid); registerMethod("House", "getBeds", LuaScriptInterface::luaHouseGetBeds); registerMethod("House", "getBedCount", LuaScriptInterface::luaHouseGetBedCount); registerMethod("House", "getDoors", LuaScriptInterface::luaHouseGetDoors); registerMethod("House", "getDoorCount", LuaScriptInterface::luaHouseGetDoorCount); registerMethod("House", "getTiles", LuaScriptInterface::luaHouseGetTiles); registerMethod("House", "getTileCount", LuaScriptInterface::luaHouseGetTileCount); registerMethod("House", "getAccessList", LuaScriptInterface::luaHouseGetAccessList); registerMethod("House", "setAccessList", LuaScriptInterface::luaHouseSetAccessList); // ItemType registerClass("ItemType", "", LuaScriptInterface::luaItemTypeCreate); registerMetaMethod("ItemType", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("ItemType", "isCorpse", LuaScriptInterface::luaItemTypeIsCorpse); registerMethod("ItemType", "isDoor", LuaScriptInterface::luaItemTypeIsDoor); registerMethod("ItemType", "isContainer", LuaScriptInterface::luaItemTypeIsContainer); registerMethod("ItemType", "isFluidContainer", LuaScriptInterface::luaItemTypeIsFluidContainer); registerMethod("ItemType", "isMovable", LuaScriptInterface::luaItemTypeIsMovable); registerMethod("ItemType", "isRune", LuaScriptInterface::luaItemTypeIsRune); registerMethod("ItemType", "isStackable", LuaScriptInterface::luaItemTypeIsStackable); registerMethod("ItemType", "isReadable", LuaScriptInterface::luaItemTypeIsReadable); registerMethod("ItemType", "isWritable", LuaScriptInterface::luaItemTypeIsWritable); registerMethod("ItemType", "getType", LuaScriptInterface::luaItemTypeGetType); registerMethod("ItemType", "getId", LuaScriptInterface::luaItemTypeGetId); registerMethod("ItemType", "getClientId", LuaScriptInterface::luaItemTypeGetClientId); registerMethod("ItemType", "getName", LuaScriptInterface::luaItemTypeGetName); registerMethod("ItemType", "getPluralName", LuaScriptInterface::luaItemTypeGetPluralName); registerMethod("ItemType", "getArticle", LuaScriptInterface::luaItemTypeGetArticle); registerMethod("ItemType", "getDescription", LuaScriptInterface::luaItemTypeGetDescription); registerMethod("ItemType", "getSlotPosition", LuaScriptInterface::luaItemTypeGetSlotPosition); registerMethod("ItemType", "getCharges", LuaScriptInterface::luaItemTypeGetCharges); registerMethod("ItemType", "getFluidSource", LuaScriptInterface::luaItemTypeGetFluidSource); registerMethod("ItemType", "getCapacity", LuaScriptInterface::luaItemTypeGetCapacity); registerMethod("ItemType", "getWeight", LuaScriptInterface::luaItemTypeGetWeight); registerMethod("ItemType", "getHitChance", LuaScriptInterface::luaItemTypeGetHitChance); registerMethod("ItemType", "getShootRange", LuaScriptInterface::luaItemTypeGetShootRange); registerMethod("ItemType", "getAttack", LuaScriptInterface::luaItemTypeGetAttack); registerMethod("ItemType", "getDefense", LuaScriptInterface::luaItemTypeGetDefense); registerMethod("ItemType", "getExtraDefense", LuaScriptInterface::luaItemTypeGetExtraDefense); registerMethod("ItemType", "getArmor", LuaScriptInterface::luaItemTypeGetArmor); registerMethod("ItemType", "getWeaponType", LuaScriptInterface::luaItemTypeGetWeaponType); registerMethod("ItemType", "getElementType", LuaScriptInterface::luaItemTypeGetElementType); registerMethod("ItemType", "getElementDamage", LuaScriptInterface::luaItemTypeGetElementDamage); registerMethod("ItemType", "getTransformEquipId", LuaScriptInterface::luaItemTypeGetTransformEquipId); registerMethod("ItemType", "getTransformDeEquipId", LuaScriptInterface::luaItemTypeGetTransformDeEquipId); registerMethod("ItemType", "getDestroyId", LuaScriptInterface::luaItemTypeGetDestroyId); registerMethod("ItemType", "getDecayId", LuaScriptInterface::luaItemTypeGetDecayId); registerMethod("ItemType", "getRequiredLevel", LuaScriptInterface::luaItemTypeGetRequiredLevel); registerMethod("ItemType", "hasSubType", LuaScriptInterface::luaItemTypeHasSubType); // Combat registerClass("Combat", "", LuaScriptInterface::luaCombatCreate); registerMetaMethod("Combat", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Combat", "setParameter", LuaScriptInterface::luaCombatSetParameter); registerMethod("Combat", "setFormula", LuaScriptInterface::luaCombatSetFormula); registerMethod("Combat", "setArea", LuaScriptInterface::luaCombatSetArea); registerMethod("Combat", "setCondition", LuaScriptInterface::luaCombatSetCondition); registerMethod("Combat", "setCallback", LuaScriptInterface::luaCombatSetCallback); registerMethod("Combat", "setOrigin", LuaScriptInterface::luaCombatSetOrigin); registerMethod("Combat", "execute", LuaScriptInterface::luaCombatExecute); // Condition registerClass("Condition", "", LuaScriptInterface::luaConditionCreate); registerMetaMethod("Condition", "__eq", LuaScriptInterface::luaUserdataCompare); registerMetaMethod("Condition", "__gc", LuaScriptInterface::luaConditionDelete); registerMethod("Condition", "delete", LuaScriptInterface::luaConditionDelete); registerMethod("Condition", "getId", LuaScriptInterface::luaConditionGetId); registerMethod("Condition", "getSubId", LuaScriptInterface::luaConditionGetSubId); registerMethod("Condition", "getType", LuaScriptInterface::luaConditionGetType); registerMethod("Condition", "getIcons", LuaScriptInterface::luaConditionGetIcons); registerMethod("Condition", "getEndTime", LuaScriptInterface::luaConditionGetEndTime); registerMethod("Condition", "clone", LuaScriptInterface::luaConditionClone); registerMethod("Condition", "getTicks", LuaScriptInterface::luaConditionGetTicks); registerMethod("Condition", "setTicks", LuaScriptInterface::luaConditionSetTicks); registerMethod("Condition", "setParameter", LuaScriptInterface::luaConditionSetParameter); registerMethod("Condition", "setFormula", LuaScriptInterface::luaConditionSetFormula); registerMethod("Condition", "setOutfit", LuaScriptInterface::luaConditionSetOutfit); registerMethod("Condition", "addDamage", LuaScriptInterface::luaConditionAddDamage); // MonsterType registerClass("MonsterType", "", LuaScriptInterface::luaMonsterTypeCreate); registerMetaMethod("MonsterType", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("MonsterType", "isAttackable", LuaScriptInterface::luaMonsterTypeIsAttackable); registerMethod("MonsterType", "isConvinceable", LuaScriptInterface::luaMonsterTypeIsConvinceable); registerMethod("MonsterType", "isSummonable", LuaScriptInterface::luaMonsterTypeIsSummonable); registerMethod("MonsterType", "isIllusionable", LuaScriptInterface::luaMonsterTypeIsIllusionable); registerMethod("MonsterType", "isHostile", LuaScriptInterface::luaMonsterTypeIsHostile); registerMethod("MonsterType", "isPushable", LuaScriptInterface::luaMonsterTypeIsPushable); registerMethod("MonsterType", "isHealthShown", LuaScriptInterface::luaMonsterTypeIsHealthShown); registerMethod("MonsterType", "canPushItems", LuaScriptInterface::luaMonsterTypeCanPushItems); registerMethod("MonsterType", "canPushCreatures", LuaScriptInterface::luaMonsterTypeCanPushCreatures); registerMethod("MonsterType", "getName", LuaScriptInterface::luaMonsterTypeGetName); registerMethod("MonsterType", "getNameDescription", LuaScriptInterface::luaMonsterTypeGetNameDescription); registerMethod("MonsterType", "getHealth", LuaScriptInterface::luaMonsterTypeGetHealth); registerMethod("MonsterType", "getMaxHealth", LuaScriptInterface::luaMonsterTypeGetMaxHealth); registerMethod("MonsterType", "getRunHealth", LuaScriptInterface::luaMonsterTypeGetRunHealth); registerMethod("MonsterType", "getExperience", LuaScriptInterface::luaMonsterTypeGetExperience); registerMethod("MonsterType", "getCombatImmunities", LuaScriptInterface::luaMonsterTypeGetCombatImmunities); registerMethod("MonsterType", "getConditionImmunities", LuaScriptInterface::luaMonsterTypeGetConditionImmunities); registerMethod("MonsterType", "getAttackList", LuaScriptInterface::luaMonsterTypeGetAttackList); registerMethod("MonsterType", "getDefenseList", LuaScriptInterface::luaMonsterTypeGetDefenseList); registerMethod("MonsterType", "getElementList", LuaScriptInterface::luaMonsterTypeGetElementList); registerMethod("MonsterType", "getVoices", LuaScriptInterface::luaMonsterTypeGetVoices); registerMethod("MonsterType", "getLoot", LuaScriptInterface::luaMonsterTypeGetLoot); registerMethod("MonsterType", "getCreatureEvents", LuaScriptInterface::luaMonsterTypeGetCreatureEvents); registerMethod("MonsterType", "getSummonList", LuaScriptInterface::luaMonsterTypeGetSummonList); registerMethod("MonsterType", "getMaxSummons", LuaScriptInterface::luaMonsterTypeGetMaxSummons); registerMethod("MonsterType", "getArmor", LuaScriptInterface::luaMonsterTypeGetArmor); registerMethod("MonsterType", "getDefense", LuaScriptInterface::luaMonsterTypeGetDefense); registerMethod("MonsterType", "getOutfit", LuaScriptInterface::luaMonsterTypeGetOutfit); registerMethod("MonsterType", "getRace", LuaScriptInterface::luaMonsterTypeGetRace); registerMethod("MonsterType", "getCorpseId", LuaScriptInterface::luaMonsterTypeGetCorpseId); registerMethod("MonsterType", "getManaCost", LuaScriptInterface::luaMonsterTypeGetManaCost); registerMethod("MonsterType", "getBaseSpeed", LuaScriptInterface::luaMonsterTypeGetBaseSpeed); registerMethod("MonsterType", "getLight", LuaScriptInterface::luaMonsterTypeGetLight); registerMethod("MonsterType", "getStaticAttackChance", LuaScriptInterface::luaMonsterTypeGetStaticAttackChance); registerMethod("MonsterType", "getTargetDistance", LuaScriptInterface::luaMonsterTypeGetTargetDistance); registerMethod("MonsterType", "getYellChance", LuaScriptInterface::luaMonsterTypeGetYellChance); registerMethod("MonsterType", "getYellSpeedTicks", LuaScriptInterface::luaMonsterTypeGetYellSpeedTicks); registerMethod("MonsterType", "getChangeTargetChance", LuaScriptInterface::luaMonsterTypeGetChangeTargetChance); registerMethod("MonsterType", "getChangeTargetSpeed", LuaScriptInterface::luaMonsterTypeGetChangeTargetSpeed); // Party registerClass("Party", "", nullptr); registerMetaMethod("Party", "__eq", LuaScriptInterface::luaUserdataCompare); registerMethod("Party", "disband", LuaScriptInterface::luaPartyDisband); registerMethod("Party", "getLeader", LuaScriptInterface::luaPartyGetLeader); registerMethod("Party", "setLeader", LuaScriptInterface::luaPartySetLeader); registerMethod("Party", "getMembers", LuaScriptInterface::luaPartyGetMembers); registerMethod("Party", "getMemberCount", LuaScriptInterface::luaPartyGetMemberCount); registerMethod("Party", "getInvitees", LuaScriptInterface::luaPartyGetInvitees); registerMethod("Party", "getInviteeCount", LuaScriptInterface::luaPartyGetInviteeCount); registerMethod("Party", "addInvite", LuaScriptInterface::luaPartyAddInvite); registerMethod("Party", "removeInvite", LuaScriptInterface::luaPartyRemoveInvite); registerMethod("Party", "addMember", LuaScriptInterface::luaPartyAddMember); registerMethod("Party", "removeMember", LuaScriptInterface::luaPartyRemoveMember); registerMethod("Party", "isSharedExperienceActive", LuaScriptInterface::luaPartyIsSharedExperienceActive); registerMethod("Party", "isSharedExperienceEnabled", LuaScriptInterface::luaPartyIsSharedExperienceEnabled); registerMethod("Party", "shareExperience", LuaScriptInterface::luaPartyShareExperience); registerMethod("Party", "setSharedExperience", LuaScriptInterface::luaPartySetSharedExperience); } #undef registerEnum #undef registerEnumIn void LuaScriptInterface::registerClass(const std::string& className, const std::string& baseClass, lua_CFunction newFunction/* = nullptr*/) { // className = {} lua_newtable(luaState); lua_pushvalue(luaState, -1); lua_setglobal(luaState, className.c_str()); int methods = lua_gettop(luaState); // methodsTable = {} lua_newtable(luaState); int methodsTable = lua_gettop(luaState); if (newFunction) { // className.__call = newFunction lua_pushcfunction(luaState, newFunction); lua_setfield(luaState, methodsTable, "__call"); } uint32_t parents = 0; if (!baseClass.empty()) { lua_getglobal(luaState, baseClass.c_str()); lua_rawgeti(luaState, -1, 'p'); parents = getNumber<uint32_t>(luaState, -1) + 1; lua_pop(luaState, 1); lua_setfield(luaState, methodsTable, "__index"); } // setmetatable(className, methodsTable) lua_setmetatable(luaState, methods); // className.metatable = {} luaL_newmetatable(luaState, className.c_str()); int metatable = lua_gettop(luaState); // className.metatable.__metatable = className lua_pushvalue(luaState, methods); lua_setfield(luaState, metatable, "__metatable"); // className.metatable.__index = className lua_pushvalue(luaState, methods); lua_setfield(luaState, metatable, "__index"); // className.metatable['h'] = hash lua_pushnumber(luaState, std::hash<std::string>()(className)); lua_rawseti(luaState, metatable, 'h'); // className.metatable['p'] = parents lua_pushnumber(luaState, parents); lua_rawseti(luaState, metatable, 'p'); // className.metatable['t'] = type if (className == "Item") { lua_pushnumber(luaState, LuaData_Item); } else if (className == "Container") { lua_pushnumber(luaState, LuaData_Container); } else if (className == "Teleport") { lua_pushnumber(luaState, LuaData_Teleport); } else if (className == "Player") { lua_pushnumber(luaState, LuaData_Player); } else if (className == "Monster") { lua_pushnumber(luaState, LuaData_Monster); } else if (className == "Npc") { lua_pushnumber(luaState, LuaData_Npc); } else if (className == "Tile") { lua_pushnumber(luaState, LuaData_Tile); } else { lua_pushnumber(luaState, LuaData_Unknown); } lua_rawseti(luaState, metatable, 't'); // pop className, className.metatable lua_pop(luaState, 2); } void LuaScriptInterface::registerTable(const std::string& tableName) { // _G[tableName] = {} lua_newtable(luaState); lua_setglobal(luaState, tableName.c_str()); } void LuaScriptInterface::registerMethod(const std::string& globalName, const std::string& methodName, lua_CFunction func) { // globalName.methodName = func lua_getglobal(luaState, globalName.c_str()); lua_pushcfunction(luaState, func); lua_setfield(luaState, -2, methodName.c_str()); // pop globalName lua_pop(luaState, 1); } void LuaScriptInterface::registerMetaMethod(const std::string& className, const std::string& methodName, lua_CFunction func) { // className.metatable.methodName = func luaL_getmetatable(luaState, className.c_str()); lua_pushcfunction(luaState, func); lua_setfield(luaState, -2, methodName.c_str()); // pop className.metatable lua_pop(luaState, 1); } void LuaScriptInterface::registerGlobalMethod(const std::string& functionName, lua_CFunction func) { // _G[functionName] = func lua_pushcfunction(luaState, func); lua_setglobal(luaState, functionName.c_str()); } void LuaScriptInterface::registerVariable(const std::string& tableName, const std::string& name, lua_Number value) { // tableName.name = value lua_getglobal(luaState, tableName.c_str()); setField(luaState, name.c_str(), value); // pop tableName lua_pop(luaState, 1); } void LuaScriptInterface::registerGlobalVariable(const std::string& name, lua_Number value) { // _G[name] = value lua_pushnumber(luaState, value); lua_setglobal(luaState, name.c_str()); } void LuaScriptInterface::registerGlobalBoolean(const std::string& name, bool value) { // _G[name] = value pushBoolean(luaState, value); lua_setglobal(luaState, name.c_str()); } int LuaScriptInterface::luaGetPlayerFlagValue(lua_State* L) { //getPlayerFlagValue(cid, flag) Player* player = getPlayer(L, 1); if (player) { PlayerFlags flag = getNumber<PlayerFlags>(L, 2); pushBoolean(L, player->hasFlag(flag)); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaGetPlayerInstantSpellCount(lua_State* L) { //getPlayerInstantSpellCount(cid) Player* player = getPlayer(L, 1); if (player) { lua_pushnumber(L, g_spells->getInstantSpellCount(player)); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaGetPlayerInstantSpellInfo(lua_State* L) { //getPlayerInstantSpellInfo(cid, index) Player* player = getPlayer(L, 1); if (!player) { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); return 1; } uint32_t index = getNumber<uint32_t>(L, 2); InstantSpell* spell = g_spells->getInstantSpellByIndex(player, index); if (!spell) { reportErrorFunc(getErrorDesc(LUA_ERROR_SPELL_NOT_FOUND)); pushBoolean(L, false); return 1; } lua_createtable(L, 0, 6); setField(L, "name", spell->getName()); setField(L, "words", spell->getWords()); setField(L, "level", spell->getLevel()); setField(L, "mlevel", spell->getMagicLevel()); setField(L, "mana", spell->getManaCost(player)); setField(L, "manapercent", spell->getManaPercent()); return 1; } int LuaScriptInterface::luaDoPlayerAddItem(lua_State* L) { //doPlayerAddItem(cid, itemid, <optional: default: 1> count/subtype, <optional: default: 1> canDropOnMap) //doPlayerAddItem(cid, itemid, <optional: default: 1> count, <optional: default: 1> canDropOnMap, <optional: default: 1>subtype) Player* player = getPlayer(L, 1); if (!player) { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); return 1; } uint16_t itemId = getNumber<uint16_t>(L, 2); int32_t count = getNumber<int32_t>(L, 3, 1); bool canDropOnMap = getBoolean(L, 4, true); uint16_t subType = getNumber<uint16_t>(L, 5, 1); const ItemType& it = Item::items[itemId]; int32_t itemCount; auto parameters = lua_gettop(L); if (parameters > 4) { //subtype already supplied, count then is the amount itemCount = std::max<int32_t>(1, count); } else if (it.hasSubType()) { if (it.stackable) { itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100)); } else { itemCount = 1; } subType = count; } else { itemCount = std::max<int32_t>(1, count); } while (itemCount > 0) { uint16_t stackCount = subType; if (it.stackable && stackCount > 100) { stackCount = 100; } Item* newItem = Item::CreateItem(itemId, stackCount); if (!newItem) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } if (it.stackable) { subType -= stackCount; } ReturnValue ret = g_game.internalPlayerAddItem(player, newItem, canDropOnMap); if (ret != RETURNVALUE_NOERROR) { delete newItem; pushBoolean(L, false); return 1; } if (--itemCount == 0) { if (newItem->getParent()) { uint32_t uid = getScriptEnv()->addThing(newItem); lua_pushnumber(L, uid); return 1; } else { //stackable item stacked with existing object, newItem will be released pushBoolean(L, false); return 1; } } } pushBoolean(L, false); return 1; } int LuaScriptInterface::luaDoTileAddItemEx(lua_State* L) { //doTileAddItemEx(pos, uid) const Position& pos = getPosition(L, 1); Tile* tile = g_game.map.getTile(pos); if (!tile) { std::ostringstream ss; ss << pos << ' ' << getErrorDesc(LUA_ERROR_TILE_NOT_FOUND); reportErrorFunc(ss.str()); pushBoolean(L, false); return 1; } uint32_t uid = getNumber<uint32_t>(L, 2); Item* item = getScriptEnv()->getItemByUID(uid); if (!item) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } if (item->getParent() != VirtualCylinder::virtualCylinder) { reportErrorFunc("Item already has a parent"); pushBoolean(L, false); return 1; } lua_pushnumber(L, g_game.internalAddItem(tile, item)); return 1; } int LuaScriptInterface::luaDoCreateItem(lua_State* L) { //doCreateItem(itemid, <optional> type/count, pos) //Returns uid of the created item, only works on tiles. const Position& pos = getPosition(L, 3); Tile* tile = g_game.map.getTile(pos); if (!tile) { std::ostringstream ss; ss << pos << ' ' << getErrorDesc(LUA_ERROR_TILE_NOT_FOUND); reportErrorFunc(ss.str()); pushBoolean(L, false); return 1; } ScriptEnvironment* env = getScriptEnv(); int32_t itemCount = 1; int32_t subType = 1; uint16_t itemId = getNumber<uint16_t>(L, 1); uint32_t count = getNumber<uint32_t>(L, 2, 1); const ItemType& it = Item::items[itemId]; if (it.hasSubType()) { if (it.stackable) { itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100)); } subType = count; } else { itemCount = std::max<int32_t>(1, count); } while (itemCount > 0) { int32_t stackCount = std::min<int32_t>(100, subType); Item* newItem = Item::CreateItem(itemId, stackCount); if (!newItem) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } if (it.stackable) { subType -= stackCount; } ReturnValue ret = g_game.internalAddItem(tile, newItem, INDEX_WHEREEVER, FLAG_NOLIMIT); if (ret != RETURNVALUE_NOERROR) { delete newItem; pushBoolean(L, false); return 1; } if (--itemCount == 0) { if (newItem->getParent()) { uint32_t uid = env->addThing(newItem); lua_pushnumber(L, uid); return 1; } else { //stackable item stacked with existing object, newItem will be released pushBoolean(L, false); return 1; } } } pushBoolean(L, false); return 1; } int LuaScriptInterface::luaDoCreateItemEx(lua_State* L) { //doCreateItemEx(itemid, <optional> count/subtype) //Returns uid of the created item uint16_t itemId = getNumber<uint16_t>(L, 1); uint32_t count = getNumber<uint32_t>(L, 2, 1); const ItemType& it = Item::items[itemId]; if (it.stackable && count > 100) { reportErrorFunc("Stack count cannot be higher than 100."); count = 100; } Item* newItem = Item::CreateItem(itemId, count); if (!newItem) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } newItem->setParent(VirtualCylinder::virtualCylinder); ScriptEnvironment* env = getScriptEnv(); env->addTempItem(newItem); uint32_t uid = env->addThing(newItem); lua_pushnumber(L, uid); return 1; } int LuaScriptInterface::luaDebugPrint(lua_State* L) { //debugPrint(text) reportErrorFunc(getString(L, -1)); return 0; } int LuaScriptInterface::luaGetWorldTime(lua_State* L) { //getWorldTime() uint32_t time = g_game.getLightHour(); lua_pushnumber(L, time); return 1; } int LuaScriptInterface::luaGetWorldLight(lua_State* L) { //getWorldLight() LightInfo lightInfo; g_game.getWorldLightInfo(lightInfo); lua_pushnumber(L, lightInfo.level); lua_pushnumber(L, lightInfo.color); return 2; } int LuaScriptInterface::luaGetWorldUpTime(lua_State* L) { //getWorldUpTime() uint64_t uptime = (OTSYS_TIME() - ProtocolStatus::start) / 1000; lua_pushnumber(L, uptime); return 1; } bool LuaScriptInterface::getArea(lua_State* L, std::list<uint32_t>& list, uint32_t& rows) { lua_pushnil(L); for (rows = 0; lua_next(L, -2) != 0; ++rows) { if (!isTable(L, -1)) { return false; } lua_pushnil(L); while (lua_next(L, -2) != 0) { if (!isNumber(L, -1)) { return false; } list.push_back(getNumber<uint32_t>(L, -1)); lua_pop(L, 1); } lua_pop(L, 1); } lua_pop(L, 1); return (rows != 0); } int LuaScriptInterface::luaCreateCombatArea(lua_State* L) { //createCombatArea( {area}, <optional> {extArea} ) ScriptEnvironment* env = getScriptEnv(); if (env->getScriptId() != EVENT_ID_LOADING) { reportErrorFunc("This function can only be used while loading the script."); pushBoolean(L, false); return 1; } uint32_t areaId = g_luaEnvironment.createAreaObject(env->getScriptInterface()); AreaCombat* area = g_luaEnvironment.getAreaObject(areaId); int parameters = lua_gettop(L); if (parameters >= 2) { uint32_t rowsExtArea; std::list<uint32_t> listExtArea; if (!isTable(L, 2) || !getArea(L, listExtArea, rowsExtArea)) { reportErrorFunc("Invalid extended area table."); pushBoolean(L, false); return 1; } area->setupExtArea(listExtArea, rowsExtArea); } uint32_t rowsArea = 0; std::list<uint32_t> listArea; if (!isTable(L, 1) || !getArea(L, listArea, rowsArea)) { reportErrorFunc("Invalid area table."); pushBoolean(L, false); return 1; } area->setupArea(listArea, rowsArea); lua_pushnumber(L, areaId); return 1; } int LuaScriptInterface::luaDoAreaCombatHealth(lua_State* L) { //doAreaCombatHealth(cid, type, pos, area, min, max, effect[, origin = ORIGIN_SPELL]) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } uint32_t areaId = getNumber<uint32_t>(L, 4); const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId); if (area || areaId == 0) { CombatType_t combatType = getNumber<CombatType_t>(L, 2); CombatParams params; params.combatType = combatType; params.impactEffect = getNumber<uint8_t>(L, 7); CombatDamage damage; damage.origin = getNumber<CombatOrigin>(L, 8, ORIGIN_SPELL); damage.primary.type = combatType; damage.primary.value = normal_random(getNumber<int32_t>(L, 6), getNumber<int32_t>(L, 5)); Combat::doCombatHealth(creature, getPosition(L, 3), area, damage, params); pushBoolean(L, true); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND)); pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaDoTargetCombatHealth(lua_State* L) { //doTargetCombatHealth(cid, target, type, min, max, effect[, origin = ORIGIN_SPELL]) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Creature* target = getCreature(L, 2); if (!target) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } CombatType_t combatType = getNumber<CombatType_t>(L, 3); CombatParams params; params.combatType = combatType; params.impactEffect = getNumber<uint8_t>(L, 6); CombatDamage damage; damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL); damage.primary.type = combatType; damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5)); Combat::doCombatHealth(creature, target, damage, params); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaDoAreaCombatMana(lua_State* L) { //doAreaCombatMana(cid, pos, area, min, max, effect[, origin = ORIGIN_SPELL]) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } uint32_t areaId = getNumber<uint32_t>(L, 3); const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId); if (area || areaId == 0) { CombatParams params; params.impactEffect = getNumber<uint8_t>(L, 6); CombatDamage damage; damage.origin = getNumber<CombatOrigin>(L, 7, ORIGIN_SPELL); damage.primary.type = COMBAT_MANADRAIN; damage.primary.value = normal_random(getNumber<int32_t>(L, 4), getNumber<int32_t>(L, 5)); Position pos = getPosition(L, 2); Combat::doCombatMana(creature, pos, area, damage, params); pushBoolean(L, true); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND)); pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaDoTargetCombatMana(lua_State* L) { //doTargetCombatMana(cid, target, min, max, effect[, origin = ORIGIN_SPELL) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Creature* target = getCreature(L, 2); if (!target) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } CombatParams params; params.impactEffect = getNumber<uint8_t>(L, 5); CombatDamage damage; damage.origin = getNumber<CombatOrigin>(L, 6, ORIGIN_SPELL); damage.primary.type = COMBAT_MANADRAIN; damage.primary.value = normal_random(getNumber<int32_t>(L, 3), getNumber<int32_t>(L, 4)); Combat::doCombatMana(creature, target, damage, params); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaDoAreaCombatCondition(lua_State* L) { //doAreaCombatCondition(cid, pos, area, condition, effect) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } const Condition* condition = getUserdata<Condition>(L, 4); if (!condition) { reportErrorFunc(getErrorDesc(LUA_ERROR_CONDITION_NOT_FOUND)); pushBoolean(L, false); return 1; } uint32_t areaId = getNumber<uint32_t>(L, 3); const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId); if (area || areaId == 0) { CombatParams params; params.impactEffect = getNumber<uint8_t>(L, 5); params.conditionList.emplace_front(condition); Combat::doCombatCondition(creature, getPosition(L, 2), area, params); pushBoolean(L, true); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND)); pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaDoTargetCombatCondition(lua_State* L) { //doTargetCombatCondition(cid, target, condition, effect) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Creature* target = getCreature(L, 2); if (!target) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } const Condition* condition = getUserdata<Condition>(L, 3); if (!condition) { reportErrorFunc(getErrorDesc(LUA_ERROR_CONDITION_NOT_FOUND)); pushBoolean(L, false); return 1; } CombatParams params; params.impactEffect = getNumber<uint8_t>(L, 4); params.conditionList.emplace_front(condition); Combat::doCombatCondition(creature, target, params); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaDoAreaCombatDispel(lua_State* L) { //doAreaCombatDispel(cid, pos, area, type, effect) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } uint32_t areaId = getNumber<uint32_t>(L, 3); const AreaCombat* area = g_luaEnvironment.getAreaObject(areaId); if (area || areaId == 0) { CombatParams params; params.impactEffect = getNumber<uint8_t>(L, 5); params.dispelType = getNumber<ConditionType_t>(L, 4); Combat::doCombatDispel(creature, getPosition(L, 2), area, params); pushBoolean(L, true); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND)); pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaDoTargetCombatDispel(lua_State* L) { //doTargetCombatDispel(cid, target, type, effect) Creature* creature = getCreature(L, 1); if (!creature && (!isNumber(L, 1) || getNumber<uint32_t>(L, 1) != 0)) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Creature* target = getCreature(L, 2); if (!target) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } CombatParams params; params.dispelType = getNumber<ConditionType_t>(L, 3); params.impactEffect = getNumber<uint8_t>(L, 4); Combat::doCombatDispel(creature, target, params); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaDoChallengeCreature(lua_State* L) { //doChallengeCreature(cid, target) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Creature* target = getCreature(L, 2); if (!target) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } target->challengeCreature(creature); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaSetCreatureOutfit(lua_State* L) { //doSetCreatureOutfit(cid, outfit, time) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Outfit_t outfit = getOutfit(L, 2); int32_t time = getNumber<int32_t>(L, 3); pushBoolean(L, Spell::CreateIllusion(creature, outfit, time) == RETURNVALUE_NOERROR); return 1; } int LuaScriptInterface::luaSetMonsterOutfit(lua_State* L) { //doSetMonsterOutfit(cid, name, time) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } std::string name = getString(L, 2); int32_t time = getNumber<int32_t>(L, 3); pushBoolean(L, Spell::CreateIllusion(creature, name, time) == RETURNVALUE_NOERROR); return 1; } int LuaScriptInterface::luaSetItemOutfit(lua_State* L) { //doSetItemOutfit(cid, item, time) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } uint32_t item = getNumber<uint32_t>(L, 2); int32_t time = getNumber<int32_t>(L, 3); pushBoolean(L, Spell::CreateIllusion(creature, item, time) == RETURNVALUE_NOERROR); return 1; } int LuaScriptInterface::luaDoMoveCreature(lua_State* L) { //doMoveCreature(cid, direction) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } Direction direction = getNumber<Direction>(L, 2); if (direction > DIRECTION_LAST) { reportErrorFunc("No valid direction"); pushBoolean(L, false); return 1; } ReturnValue ret = g_game.internalMoveCreature(creature, direction, FLAG_NOLIMIT); lua_pushnumber(L, ret); return 1; } int LuaScriptInterface::luaIsValidUID(lua_State* L) { //isValidUID(uid) pushBoolean(L, getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)) != nullptr); return 1; } int LuaScriptInterface::luaIsDepot(lua_State* L) { //isDepot(uid) Container* container = getScriptEnv()->getContainerByUID(getNumber<uint32_t>(L, -1)); pushBoolean(L, container && container->getDepotLocker()); return 1; } int LuaScriptInterface::luaIsMoveable(lua_State* L) { //isMoveable(uid) //isMovable(uid) Thing* thing = getScriptEnv()->getThingByUID(getNumber<uint32_t>(L, -1)); pushBoolean(L, thing && thing->isPushable()); return 1; } int LuaScriptInterface::luaDoAddContainerItem(lua_State* L) { //doAddContainerItem(uid, itemid, <optional> count/subtype) uint32_t uid = getNumber<uint32_t>(L, 1); ScriptEnvironment* env = getScriptEnv(); Container* container = env->getContainerByUID(uid); if (!container) { reportErrorFunc(getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND)); pushBoolean(L, false); return 1; } uint16_t itemId = getNumber<uint16_t>(L, 2); const ItemType& it = Item::items[itemId]; int32_t itemCount = 1; int32_t subType = 1; uint32_t count = getNumber<uint32_t>(L, 3, 1); if (it.hasSubType()) { if (it.stackable) { itemCount = static_cast<int32_t>(std::ceil(static_cast<float>(count) / 100)); } subType = count; } else { itemCount = std::max<int32_t>(1, count); } while (itemCount > 0) { int32_t stackCount = std::min<int32_t>(100, subType); Item* newItem = Item::CreateItem(itemId, stackCount); if (!newItem) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } if (it.stackable) { subType -= stackCount; } ReturnValue ret = g_game.internalAddItem(container, newItem); if (ret != RETURNVALUE_NOERROR) { delete newItem; pushBoolean(L, false); return 1; } if (--itemCount == 0) { if (newItem->getParent()) { lua_pushnumber(L, env->addThing(newItem)); } else { //stackable item stacked with existing object, newItem will be released pushBoolean(L, false); } return 1; } } pushBoolean(L, false); return 1; } int LuaScriptInterface::luaGetDepotId(lua_State* L) { //getDepotId(uid) uint32_t uid = getNumber<uint32_t>(L, -1); Container* container = getScriptEnv()->getContainerByUID(uid); if (!container) { reportErrorFunc(getErrorDesc(LUA_ERROR_CONTAINER_NOT_FOUND)); pushBoolean(L, false); return 1; } DepotLocker* depotLocker = container->getDepotLocker(); if (!depotLocker) { reportErrorFunc("Depot not found"); pushBoolean(L, false); return 1; } lua_pushnumber(L, depotLocker->getDepotId()); return 1; } int LuaScriptInterface::luaIsInArray(lua_State* L) { //isInArray(array, value) if (!isTable(L, 1)) { pushBoolean(L, false); return 1; } lua_pushnil(L); while (lua_next(L, 1)) { if (lua_equal(L, 2, -1) != 0) { pushBoolean(L, true); return 1; } lua_pop(L, 1); } pushBoolean(L, false); return 1; } int LuaScriptInterface::luaDoSetCreatureLight(lua_State* L) { //doSetCreatureLight(cid, lightLevel, lightColor, time) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); return 1; } uint16_t level = getNumber<uint16_t>(L, 2); uint16_t color = getNumber<uint16_t>(L, 3); uint32_t time = getNumber<uint32_t>(L, 4); Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_LIGHT, time, level | (color << 8)); creature->addCondition(condition); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaAddEvent(lua_State* L) { //addEvent(callback, delay, ...) lua_State* globalState = g_luaEnvironment.getLuaState(); if (!globalState) { reportErrorFunc("No valid script interface!"); pushBoolean(L, false); return 1; } else if (globalState != L) { lua_xmove(L, globalState, lua_gettop(L)); } int parameters = lua_gettop(globalState); if (!isFunction(globalState, -parameters)) { //-parameters means the first parameter from left to right reportErrorFunc("callback parameter should be a function."); pushBoolean(L, false); return 1; } if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS) || g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) { std::vector<std::pair<int32_t, LuaDataType>> indexes; for (int i = 3; i <= parameters; ++i) { if (lua_getmetatable(globalState, i) == 0) { continue; } lua_rawgeti(L, -1, 't'); LuaDataType type = getNumber<LuaDataType>(L, -1); if (type != LuaData_Unknown && type != LuaData_Tile) { indexes.push_back({i, type}); } lua_pop(globalState, 2); } if (!indexes.empty()) { if (g_config.getBoolean(ConfigManager::WARN_UNSAFE_SCRIPTS)) { bool plural = indexes.size() > 1; std::string warningString = "Argument"; if (plural) { warningString += 's'; } for (const auto& entry : indexes) { if (entry == indexes.front()) { warningString += ' '; } else if (entry == indexes.back()) { warningString += " and "; } else { warningString += ", "; } warningString += '#'; warningString += std::to_string(entry.first); } if (plural) { warningString += " are unsafe"; } else { warningString += " is unsafe"; } reportErrorFunc(warningString); } if (g_config.getBoolean(ConfigManager::CONVERT_UNSAFE_SCRIPTS)) { for (const auto& entry : indexes) { switch (entry.second) { case LuaData_Item: case LuaData_Container: case LuaData_Teleport: { lua_getglobal(globalState, "Item"); lua_getfield(globalState, -1, "getUniqueId"); break; } case LuaData_Player: case LuaData_Monster: case LuaData_Npc: { lua_getglobal(globalState, "Creature"); lua_getfield(globalState, -1, "getId"); break; } default: break; } lua_replace(globalState, -2); lua_pushvalue(globalState, entry.first); lua_call(globalState, 1, 1); lua_replace(globalState, entry.first); } } } } LuaTimerEventDesc eventDesc; for (int i = 0; i < parameters - 2; ++i) { //-2 because addEvent needs at least two parameters eventDesc.parameters.push_back(luaL_ref(globalState, LUA_REGISTRYINDEX)); } uint32_t delay = std::max<uint32_t>(100, getNumber<uint32_t>(globalState, 2)); lua_pop(globalState, 1); eventDesc.function = luaL_ref(globalState, LUA_REGISTRYINDEX); eventDesc.scriptId = getScriptEnv()->getScriptId(); auto& lastTimerEventId = g_luaEnvironment.lastEventTimerId; eventDesc.eventId = g_scheduler.addEvent(createSchedulerTask( delay, std::bind(&LuaEnvironment::executeTimerEvent, &g_luaEnvironment, lastTimerEventId) )); g_luaEnvironment.timerEvents.emplace(lastTimerEventId, std::move(eventDesc)); lua_pushnumber(L, lastTimerEventId++); return 1; } int LuaScriptInterface::luaStopEvent(lua_State* L) { //stopEvent(eventid) lua_State* globalState = g_luaEnvironment.getLuaState(); if (!globalState) { reportErrorFunc("No valid script interface!"); pushBoolean(L, false); return 1; } uint32_t eventId = getNumber<uint32_t>(L, 1); auto& timerEvents = g_luaEnvironment.timerEvents; auto it = timerEvents.find(eventId); if (it == timerEvents.end()) { pushBoolean(L, false); return 1; } LuaTimerEventDesc timerEventDesc = std::move(it->second); timerEvents.erase(it); g_scheduler.stopEvent(timerEventDesc.eventId); luaL_unref(globalState, LUA_REGISTRYINDEX, timerEventDesc.function); for (auto parameter : timerEventDesc.parameters) { luaL_unref(globalState, LUA_REGISTRYINDEX, parameter); } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaGetCreatureCondition(lua_State* L) { Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } ConditionType_t condition = getNumber<ConditionType_t>(L, 2); uint32_t subId = getNumber<uint32_t>(L, 3, 0); pushBoolean(L, creature->hasCondition(condition, subId)); return 1; } int LuaScriptInterface::luaSaveServer(lua_State* L) { g_game.saveGameState(); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCleanMap(lua_State* L) { lua_pushnumber(L, g_game.map.clean()); return 1; } int LuaScriptInterface::luaIsInWar(lua_State* L) { //isInWar(cid, target) Player* player = getPlayer(L, 1); if (!player) { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); return 1; } Player* targetPlayer = getPlayer(L, 2); if (!targetPlayer) { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); pushBoolean(L, false); return 1; } pushBoolean(L, player->isInWar(targetPlayer)); return 1; } int LuaScriptInterface::luaGetWaypointPositionByName(lua_State* L) { //getWaypointPositionByName(name) auto& waypoints = g_game.map.waypoints; auto it = waypoints.find(getString(L, -1)); if (it != waypoints.end()) { pushPosition(L, it->second); } else { pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaSendChannelMessage(lua_State* L) { //sendChannelMessage(channelId, type, message) uint32_t channelId = getNumber<uint32_t>(L, 1); ChatChannel* channel = g_chat->getChannelById(channelId); if (!channel) { pushBoolean(L, false); return 1; } SpeakClasses type = getNumber<SpeakClasses>(L, 2); std::string message = getString(L, 3); channel->sendToAll(message, type); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaSendGuildChannelMessage(lua_State* L) { //sendGuildChannelMessage(guildId, type, message) uint32_t guildId = getNumber<uint32_t>(L, 1); ChatChannel* channel = g_chat->getGuildChannelById(guildId); if (!channel) { pushBoolean(L, false); return 1; } SpeakClasses type = getNumber<SpeakClasses>(L, 2); std::string message = getString(L, 3); channel->sendToAll(message, type); pushBoolean(L, true); return 1; } std::string LuaScriptInterface::escapeString(const std::string& string) { std::string s = string; replaceString(s, "\\", "\\\\"); replaceString(s, "\"", "\\\""); replaceString(s, "'", "\\'"); replaceString(s, "[[", "\\[["); return s; } #ifndef LUAJIT_VERSION const luaL_Reg LuaScriptInterface::luaBitReg[] = { //{"tobit", LuaScriptInterface::luaBitToBit}, {"bnot", LuaScriptInterface::luaBitNot}, {"band", LuaScriptInterface::luaBitAnd}, {"bor", LuaScriptInterface::luaBitOr}, {"bxor", LuaScriptInterface::luaBitXor}, {"lshift", LuaScriptInterface::luaBitLeftShift}, {"rshift", LuaScriptInterface::luaBitRightShift}, //{"arshift", LuaScriptInterface::luaBitArithmeticalRightShift}, //{"rol", LuaScriptInterface::luaBitRotateLeft}, //{"ror", LuaScriptInterface::luaBitRotateRight}, //{"bswap", LuaScriptInterface::luaBitSwapEndian}, //{"tohex", LuaScriptInterface::luaBitToHex}, {nullptr, nullptr} }; int LuaScriptInterface::luaBitNot(lua_State* L) { lua_pushnumber(L, ~getNumber<uint32_t>(L, -1)); return 1; } #define MULTIOP(name, op) \ int LuaScriptInterface::luaBit##name(lua_State* L) \ { \ int n = lua_gettop(L); \ uint32_t w = getNumber<uint32_t>(L, -1); \ for (int i = 1; i < n; ++i) \ w op getNumber<uint32_t>(L, i); \ lua_pushnumber(L, w); \ return 1; \ } MULTIOP(And, &= ) MULTIOP(Or, |= ) MULTIOP(Xor, ^= ) #define SHIFTOP(name, op) \ int LuaScriptInterface::luaBit##name(lua_State* L) \ { \ uint32_t n1 = getNumber<uint32_t>(L, 1), n2 = getNumber<uint32_t>(L, 2); \ lua_pushnumber(L, (n1 op n2)); \ return 1; \ } SHIFTOP(LeftShift, << ) SHIFTOP(RightShift, >> ) #endif const luaL_Reg LuaScriptInterface::luaConfigManagerTable[] = { {"getString", LuaScriptInterface::luaConfigManagerGetString}, {"getNumber", LuaScriptInterface::luaConfigManagerGetNumber}, {"getBoolean", LuaScriptInterface::luaConfigManagerGetBoolean}, {nullptr, nullptr} }; int LuaScriptInterface::luaConfigManagerGetString(lua_State* L) { pushString(L, g_config.getString(getNumber<ConfigManager::string_config_t>(L, -1))); return 1; } int LuaScriptInterface::luaConfigManagerGetNumber(lua_State* L) { lua_pushnumber(L, g_config.getNumber(getNumber<ConfigManager::integer_config_t>(L, -1))); return 1; } int LuaScriptInterface::luaConfigManagerGetBoolean(lua_State* L) { pushBoolean(L, g_config.getBoolean(getNumber<ConfigManager::boolean_config_t>(L, -1))); return 1; } const luaL_Reg LuaScriptInterface::luaDatabaseTable[] = { {"query", LuaScriptInterface::luaDatabaseExecute}, {"asyncQuery", LuaScriptInterface::luaDatabaseAsyncExecute}, {"storeQuery", LuaScriptInterface::luaDatabaseStoreQuery}, {"asyncStoreQuery", LuaScriptInterface::luaDatabaseAsyncStoreQuery}, {"escapeString", LuaScriptInterface::luaDatabaseEscapeString}, {"escapeBlob", LuaScriptInterface::luaDatabaseEscapeBlob}, {"lastInsertId", LuaScriptInterface::luaDatabaseLastInsertId}, {"tableExists", LuaScriptInterface::luaDatabaseTableExists}, {nullptr, nullptr} }; int LuaScriptInterface::luaDatabaseExecute(lua_State* L) { pushBoolean(L, Database::getInstance().executeQuery(getString(L, -1))); return 1; } int LuaScriptInterface::luaDatabaseAsyncExecute(lua_State* L) { std::function<void(DBResult_ptr, bool)> callback; if (lua_gettop(L) > 1) { int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX); auto scriptId = getScriptEnv()->getScriptId(); callback = [ref, scriptId](DBResult_ptr, bool success) { lua_State* luaState = g_luaEnvironment.getLuaState(); if (!luaState) { return; } if (!LuaScriptInterface::reserveScriptEnv()) { luaL_unref(luaState, LUA_REGISTRYINDEX, ref); return; } lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref); pushBoolean(luaState, success); auto env = getScriptEnv(); env->setScriptId(scriptId, &g_luaEnvironment); g_luaEnvironment.callFunction(1); luaL_unref(luaState, LUA_REGISTRYINDEX, ref); }; } g_databaseTasks.addTask(getString(L, -1), callback); return 0; } int LuaScriptInterface::luaDatabaseStoreQuery(lua_State* L) { if (DBResult_ptr res = Database::getInstance().storeQuery(getString(L, -1))) { lua_pushnumber(L, ScriptEnvironment::addResult(res)); } else { pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaDatabaseAsyncStoreQuery(lua_State* L) { std::function<void(DBResult_ptr, bool)> callback; if (lua_gettop(L) > 1) { int32_t ref = luaL_ref(L, LUA_REGISTRYINDEX); auto scriptId = getScriptEnv()->getScriptId(); callback = [ref, scriptId](DBResult_ptr result, bool) { lua_State* luaState = g_luaEnvironment.getLuaState(); if (!luaState) { return; } if (!LuaScriptInterface::reserveScriptEnv()) { luaL_unref(luaState, LUA_REGISTRYINDEX, ref); return; } lua_rawgeti(luaState, LUA_REGISTRYINDEX, ref); if (result) { lua_pushnumber(luaState, ScriptEnvironment::addResult(result)); } else { pushBoolean(luaState, false); } auto env = getScriptEnv(); env->setScriptId(scriptId, &g_luaEnvironment); g_luaEnvironment.callFunction(1); luaL_unref(luaState, LUA_REGISTRYINDEX, ref); }; } g_databaseTasks.addTask(getString(L, -1), callback, true); return 0; } int LuaScriptInterface::luaDatabaseEscapeString(lua_State* L) { pushString(L, Database::getInstance().escapeString(getString(L, -1))); return 1; } int LuaScriptInterface::luaDatabaseEscapeBlob(lua_State* L) { uint32_t length = getNumber<uint32_t>(L, 2); pushString(L, Database::getInstance().escapeBlob(getString(L, 1).c_str(), length)); return 1; } int LuaScriptInterface::luaDatabaseLastInsertId(lua_State* L) { lua_pushnumber(L, Database::getInstance().getLastInsertId()); return 1; } int LuaScriptInterface::luaDatabaseTableExists(lua_State* L) { pushBoolean(L, DatabaseManager::tableExists(getString(L, -1))); return 1; } const luaL_Reg LuaScriptInterface::luaResultTable[] = { {"getNumber", LuaScriptInterface::luaResultGetNumber}, {"getString", LuaScriptInterface::luaResultGetString}, {"getStream", LuaScriptInterface::luaResultGetStream}, {"next", LuaScriptInterface::luaResultNext}, {"free", LuaScriptInterface::luaResultFree}, {nullptr, nullptr} }; int LuaScriptInterface::luaResultGetNumber(lua_State* L) { DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1)); if (!res) { pushBoolean(L, false); return 1; } const std::string& s = getString(L, 2); lua_pushnumber(L, res->getNumber<int64_t>(s)); return 1; } int LuaScriptInterface::luaResultGetString(lua_State* L) { DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1)); if (!res) { pushBoolean(L, false); return 1; } const std::string& s = getString(L, 2); pushString(L, res->getString(s)); return 1; } int LuaScriptInterface::luaResultGetStream(lua_State* L) { DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, 1)); if (!res) { pushBoolean(L, false); return 1; } unsigned long length; const char* stream = res->getStream(getString(L, 2), length); lua_pushlstring(L, stream, length); lua_pushnumber(L, length); return 2; } int LuaScriptInterface::luaResultNext(lua_State* L) { DBResult_ptr res = ScriptEnvironment::getResultByID(getNumber<uint32_t>(L, -1)); if (!res) { pushBoolean(L, false); return 1; } pushBoolean(L, res->next()); return 1; } int LuaScriptInterface::luaResultFree(lua_State* L) { pushBoolean(L, ScriptEnvironment::removeResult(getNumber<uint32_t>(L, -1))); return 1; } // Userdata int LuaScriptInterface::luaUserdataCompare(lua_State* L) { // userdataA == userdataB pushBoolean(L, getUserdata<void>(L, 1) == getUserdata<void>(L, 2)); return 1; } // _G int LuaScriptInterface::luaIsType(lua_State* L) { // isType(derived, base) lua_getmetatable(L, -2); lua_getmetatable(L, -2); lua_rawgeti(L, -2, 'p'); uint_fast8_t parentsB = getNumber<uint_fast8_t>(L, 1); lua_rawgeti(L, -3, 'h'); size_t hashB = getNumber<size_t>(L, 1); lua_rawgeti(L, -3, 'p'); uint_fast8_t parentsA = getNumber<uint_fast8_t>(L, 1); for (uint_fast8_t i = parentsA; i < parentsB; ++i) { lua_getfield(L, -3, "__index"); lua_replace(L, -4); } lua_rawgeti(L, -4, 'h'); size_t hashA = getNumber<size_t>(L, 1); pushBoolean(L, hashA == hashB); return 1; } int LuaScriptInterface::luaRawGetMetatable(lua_State* L) { // rawgetmetatable(metatableName) luaL_getmetatable(L, getString(L, 1).c_str()); return 1; } // os int LuaScriptInterface::luaSystemTime(lua_State* L) { // os.mtime() lua_pushnumber(L, OTSYS_TIME()); return 1; } // table int LuaScriptInterface::luaTableCreate(lua_State* L) { // table.create(arrayLength, keyLength) lua_createtable(L, getNumber<int32_t>(L, 1), getNumber<int32_t>(L, 2)); return 1; } // Game int LuaScriptInterface::luaGameGetSpectators(lua_State* L) { // Game.getSpectators(position[, multifloor = false[, onlyPlayer = false[, minRangeX = 0[, maxRangeX = 0[, minRangeY = 0[, maxRangeY = 0]]]]]]) const Position& position = getPosition(L, 1); bool multifloor = getBoolean(L, 2, false); bool onlyPlayers = getBoolean(L, 3, false); int32_t minRangeX = getNumber<int32_t>(L, 4, 0); int32_t maxRangeX = getNumber<int32_t>(L, 5, 0); int32_t minRangeY = getNumber<int32_t>(L, 6, 0); int32_t maxRangeY = getNumber<int32_t>(L, 7, 0); SpectatorHashSet spectators; g_game.map.getSpectators(spectators, position, multifloor, onlyPlayers, minRangeX, maxRangeX, minRangeY, maxRangeY); lua_createtable(L, spectators.size(), 0); int index = 0; for (Creature* creature : spectators) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaGameGetPlayers(lua_State* L) { // Game.getPlayers() lua_createtable(L, g_game.getPlayersOnline(), 0); int index = 0; for (const auto& playerEntry : g_game.getPlayers()) { pushUserdata<Player>(L, playerEntry.second); setMetatable(L, -1, "Player"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaGameLoadMap(lua_State* L) { // Game.loadMap(path) const std::string& path = getString(L, 1); g_dispatcher.addTask(createTask(std::bind(&Game::loadMap, &g_game, path))); return 0; } int LuaScriptInterface::luaGameGetExperienceStage(lua_State* L) { // Game.getExperienceStage(level) uint32_t level = getNumber<uint32_t>(L, 1); lua_pushnumber(L, g_game.getExperienceStage(level)); return 1; } int LuaScriptInterface::luaGameGetMonsterCount(lua_State* L) { // Game.getMonsterCount() lua_pushnumber(L, g_game.getMonstersOnline()); return 1; } int LuaScriptInterface::luaGameGetPlayerCount(lua_State* L) { // Game.getPlayerCount() lua_pushnumber(L, g_game.getPlayersOnline()); return 1; } int LuaScriptInterface::luaGameGetNpcCount(lua_State* L) { // Game.getNpcCount() lua_pushnumber(L, g_game.getNpcsOnline()); return 1; } int LuaScriptInterface::luaGameGetTowns(lua_State* L) { // Game.getTowns() const auto& towns = g_game.map.towns.getTowns(); lua_createtable(L, towns.size(), 0); int index = 0; for (auto townEntry : towns) { pushUserdata<Town>(L, townEntry.second); setMetatable(L, -1, "Town"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaGameGetHouses(lua_State* L) { // Game.getHouses() const auto& houses = g_game.map.houses.getHouses(); lua_createtable(L, houses.size(), 0); int index = 0; for (auto houseEntry : houses) { pushUserdata<House>(L, houseEntry.second); setMetatable(L, -1, "House"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaGameGetGameState(lua_State* L) { // Game.getGameState() lua_pushnumber(L, g_game.getGameState()); return 1; } int LuaScriptInterface::luaGameSetGameState(lua_State* L) { // Game.setGameState(state) GameState_t state = getNumber<GameState_t>(L, 1); g_game.setGameState(state); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaGameGetWorldType(lua_State* L) { // Game.getWorldType() lua_pushnumber(L, g_game.getWorldType()); return 1; } int LuaScriptInterface::luaGameSetWorldType(lua_State* L) { // Game.setWorldType(type) WorldType_t type = getNumber<WorldType_t>(L, 1); g_game.setWorldType(type); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaGameGetReturnMessage(lua_State* L) { // Game.getReturnMessage(value) ReturnValue value = getNumber<ReturnValue>(L, 1); pushString(L, getReturnMessage(value)); return 1; } int LuaScriptInterface::luaGameCreateItem(lua_State* L) { // Game.createItem(itemId[, count[, position]]) uint16_t count = getNumber<uint16_t>(L, 2, 1); uint16_t id; if (isNumber(L, 1)) { id = getNumber<uint16_t>(L, 1); } else { id = Item::items.getItemIdByName(getString(L, 1)); if (id == 0) { lua_pushnil(L); return 1; } } const ItemType& it = Item::items[id]; if (it.stackable) { count = std::min<uint16_t>(count, 100); } Item* item = Item::CreateItem(id, count); if (!item) { lua_pushnil(L); return 1; } if (lua_gettop(L) >= 3) { const Position& position = getPosition(L, 3); Tile* tile = g_game.map.getTile(position); if (!tile) { delete item; lua_pushnil(L); return 1; } g_game.internalAddItem(tile, item, INDEX_WHEREEVER, FLAG_NOLIMIT); } else { getScriptEnv()->addTempItem(item); item->setParent(VirtualCylinder::virtualCylinder); } pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); return 1; } int LuaScriptInterface::luaGameCreateContainer(lua_State* L) { // Game.createContainer(itemId, size[, position]) uint16_t size = getNumber<uint16_t>(L, 2); uint16_t id; if (isNumber(L, 1)) { id = getNumber<uint16_t>(L, 1); } else { id = Item::items.getItemIdByName(getString(L, 1)); if (id == 0) { lua_pushnil(L); return 1; } } Container* container = Item::CreateItemAsContainer(id, size); if (!container) { lua_pushnil(L); return 1; } if (lua_gettop(L) >= 3) { const Position& position = getPosition(L, 3); Tile* tile = g_game.map.getTile(position); if (!tile) { delete container; lua_pushnil(L); return 1; } g_game.internalAddItem(tile, container, INDEX_WHEREEVER, FLAG_NOLIMIT); } else { getScriptEnv()->addTempItem(container); container->setParent(VirtualCylinder::virtualCylinder); } pushUserdata<Container>(L, container); setMetatable(L, -1, "Container"); return 1; } int LuaScriptInterface::luaGameCreateMonster(lua_State* L) { // Game.createMonster(monsterName, position[, extended = false[, force = false]]) Monster* monster = Monster::createMonster(getString(L, 1)); if (!monster) { lua_pushnil(L); return 1; } const Position& position = getPosition(L, 2); bool extended = getBoolean(L, 3, false); bool force = getBoolean(L, 4, false); if (g_game.placeCreature(monster, position, extended, force)) { pushUserdata<Monster>(L, monster); setMetatable(L, -1, "Monster"); } else { delete monster; lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGameCreateNpc(lua_State* L) { // Game.createNpc(npcName, position[, extended = false[, force = false]]) Npc* npc = Npc::createNpc(getString(L, 1)); if (!npc) { lua_pushnil(L); return 1; } const Position& position = getPosition(L, 2); bool extended = getBoolean(L, 3, false); bool force = getBoolean(L, 4, false); if (g_game.placeCreature(npc, position, extended, force)) { pushUserdata<Npc>(L, npc); setMetatable(L, -1, "Npc"); } else { delete npc; lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGameCreateTile(lua_State* L) { // Game.createTile(x, y, z[, isDynamic = false]) // Game.createTile(position[, isDynamic = false]) Position position; bool isDynamic; if (isTable(L, 1)) { position = getPosition(L, 1); isDynamic = getBoolean(L, 2, false); } else { position.x = getNumber<uint16_t>(L, 1); position.y = getNumber<uint16_t>(L, 2); position.z = getNumber<uint16_t>(L, 3); isDynamic = getBoolean(L, 4, false); } Tile* tile = g_game.map.getTile(position); if (!tile) { if (isDynamic) { tile = new DynamicTile(position.x, position.y, position.z); } else { tile = new StaticTile(position.x, position.y, position.z); } g_game.map.setTile(position, tile); } pushUserdata(L, tile); setMetatable(L, -1, "Tile"); return 1; } int LuaScriptInterface::luaGameStartRaid(lua_State* L) { // Game.startRaid(raidName) const std::string& raidName = getString(L, 1); Raid* raid = g_game.raids.getRaidByName(raidName); if (raid) { raid->startRaid(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } // Variant int LuaScriptInterface::luaVariantCreate(lua_State* L) { // Variant(number or string or position or thing) LuaVariant variant; if (isUserdata(L, 2)) { if (Thing* thing = getThing(L, 2)) { variant.type = VARIANT_TARGETPOSITION; variant.pos = thing->getPosition(); } } else if (isTable(L, 2)) { variant.type = VARIANT_POSITION; variant.pos = getPosition(L, 2); } else if (isNumber(L, 2)) { variant.type = VARIANT_NUMBER; variant.number = getNumber<uint32_t>(L, 2); } else if (isString(L, 2)) { variant.type = VARIANT_STRING; variant.text = getString(L, 2); } pushVariant(L, variant); return 1; } int LuaScriptInterface::luaVariantGetNumber(lua_State* L) { // Variant:getNumber() const LuaVariant& variant = getVariant(L, 1); if (variant.type == VARIANT_NUMBER) { lua_pushnumber(L, variant.number); } else { lua_pushnumber(L, 0); } return 1; } int LuaScriptInterface::luaVariantGetString(lua_State* L) { // Variant:getString() const LuaVariant& variant = getVariant(L, 1); if (variant.type == VARIANT_STRING) { pushString(L, variant.text); } else { pushString(L, std::string()); } return 1; } int LuaScriptInterface::luaVariantGetPosition(lua_State* L) { // Variant:getPosition() const LuaVariant& variant = getVariant(L, 1); if (variant.type == VARIANT_POSITION || variant.type == VARIANT_TARGETPOSITION) { pushPosition(L, variant.pos); } else { pushPosition(L, Position()); } return 1; } // Position int LuaScriptInterface::luaPositionCreate(lua_State* L) { // Position([x = 0[, y = 0[, z = 0[, stackpos = 0]]]]) // Position([position]) if (lua_gettop(L) <= 1) { pushPosition(L, Position()); return 1; } int32_t stackpos; if (isTable(L, 2)) { const Position& position = getPosition(L, 2, stackpos); pushPosition(L, position, stackpos); } else { uint16_t x = getNumber<uint16_t>(L, 2, 0); uint16_t y = getNumber<uint16_t>(L, 3, 0); uint8_t z = getNumber<uint8_t>(L, 4, 0); stackpos = getNumber<int32_t>(L, 5, 0); pushPosition(L, Position(x, y, z), stackpos); } return 1; } int LuaScriptInterface::luaPositionAdd(lua_State* L) { // positionValue = position + positionEx int32_t stackpos; const Position& position = getPosition(L, 1, stackpos); Position positionEx; if (stackpos == 0) { positionEx = getPosition(L, 2, stackpos); } else { positionEx = getPosition(L, 2); } pushPosition(L, position + positionEx, stackpos); return 1; } int LuaScriptInterface::luaPositionSub(lua_State* L) { // positionValue = position - positionEx int32_t stackpos; const Position& position = getPosition(L, 1, stackpos); Position positionEx; if (stackpos == 0) { positionEx = getPosition(L, 2, stackpos); } else { positionEx = getPosition(L, 2); } pushPosition(L, position - positionEx, stackpos); return 1; } int LuaScriptInterface::luaPositionCompare(lua_State* L) { // position == positionEx const Position& positionEx = getPosition(L, 2); const Position& position = getPosition(L, 1); pushBoolean(L, position == positionEx); return 1; } int LuaScriptInterface::luaPositionGetDistance(lua_State* L) { // position:getDistance(positionEx) const Position& positionEx = getPosition(L, 2); const Position& position = getPosition(L, 1); lua_pushnumber(L, std::max<int32_t>( std::max<int32_t>( std::abs(Position::getDistanceX(position, positionEx)), std::abs(Position::getDistanceY(position, positionEx)) ), std::abs(Position::getDistanceZ(position, positionEx)) )); return 1; } int LuaScriptInterface::luaPositionIsSightClear(lua_State* L) { // position:isSightClear(positionEx[, sameFloor = true]) bool sameFloor = getBoolean(L, 3, true); const Position& positionEx = getPosition(L, 2); const Position& position = getPosition(L, 1); pushBoolean(L, g_game.isSightClear(position, positionEx, sameFloor)); return 1; } int LuaScriptInterface::luaPositionSendMagicEffect(lua_State* L) { // position:sendMagicEffect(magicEffect[, player = nullptr]) SpectatorHashSet spectators; if (lua_gettop(L) >= 3) { Player* player = getPlayer(L, 3); if (player) { spectators.insert(player); } } MagicEffectClasses magicEffect = getNumber<MagicEffectClasses>(L, 2); const Position& position = getPosition(L, 1); if (!spectators.empty()) { Game::addMagicEffect(spectators, position, magicEffect); } else { g_game.addMagicEffect(position, magicEffect); } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPositionSendDistanceEffect(lua_State* L) { // position:sendDistanceEffect(positionEx, distanceEffect[, player = nullptr]) SpectatorHashSet spectators; if (lua_gettop(L) >= 4) { Player* player = getPlayer(L, 4); if (player) { spectators.insert(player); } } ShootType_t distanceEffect = getNumber<ShootType_t>(L, 3); const Position& positionEx = getPosition(L, 2); const Position& position = getPosition(L, 1); if (!spectators.empty()) { Game::addDistanceEffect(spectators, position, positionEx, distanceEffect); } else { g_game.addDistanceEffect(position, positionEx, distanceEffect); } pushBoolean(L, true); return 1; } // Tile int LuaScriptInterface::luaTileCreate(lua_State* L) { // Tile(x, y, z) // Tile(position) Tile* tile; if (isTable(L, 2)) { tile = g_game.map.getTile(getPosition(L, 2)); } else { uint8_t z = getNumber<uint8_t>(L, 4); uint16_t y = getNumber<uint16_t>(L, 3); uint16_t x = getNumber<uint16_t>(L, 2); tile = g_game.map.getTile(x, y, z); } if (tile) { pushUserdata<Tile>(L, tile); setMetatable(L, -1, "Tile"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetPosition(lua_State* L) { // tile:getPosition() Tile* tile = getUserdata<Tile>(L, 1); if (tile) { pushPosition(L, tile->getPosition()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetGround(lua_State* L) { // tile:getGround() Tile* tile = getUserdata<Tile>(L, 1); if (tile && tile->getGround()) { pushUserdata<Item>(L, tile->getGround()); setItemMetatable(L, -1, tile->getGround()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetThing(lua_State* L) { // tile:getThing(index) int32_t index = getNumber<int32_t>(L, 2); Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Thing* thing = tile->getThing(index); if (!thing) { lua_pushnil(L); return 1; } if (Creature* creature = thing->getCreature()) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); } else if (Item* item = thing->getItem()) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetThingCount(lua_State* L) { // tile:getThingCount() Tile* tile = getUserdata<Tile>(L, 1); if (tile) { lua_pushnumber(L, tile->getThingCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetTopVisibleThing(lua_State* L) { // tile:getTopVisibleThing(creature) Creature* creature = getCreature(L, 2); Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Thing* thing = tile->getTopVisibleThing(creature); if (!thing) { lua_pushnil(L); return 1; } if (Creature* visibleCreature = thing->getCreature()) { pushUserdata<Creature>(L, visibleCreature); setCreatureMetatable(L, -1, visibleCreature); } else if (Item* visibleItem = thing->getItem()) { pushUserdata<Item>(L, visibleItem); setItemMetatable(L, -1, visibleItem); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetTopTopItem(lua_State* L) { // tile:getTopTopItem() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Item* item = tile->getTopTopItem(); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetTopDownItem(lua_State* L) { // tile:getTopDownItem() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Item* item = tile->getTopDownItem(); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetFieldItem(lua_State* L) { // tile:getFieldItem() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Item* item = tile->getFieldItem(); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetItemById(lua_State* L) { // tile:getItemById(itemId[, subType = -1]) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } int32_t subType = getNumber<int32_t>(L, 3, -1); Item* item = g_game.findItemOfType(tile, itemId, false, subType); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetItemByType(lua_State* L) { // tile:getItemByType(itemType) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } bool found; ItemTypes_t itemType = getNumber<ItemTypes_t>(L, 2); switch (itemType) { case ITEM_TYPE_TELEPORT: found = tile->hasFlag(TILESTATE_TELEPORT); break; case ITEM_TYPE_MAGICFIELD: found = tile->hasFlag(TILESTATE_MAGICFIELD); break; case ITEM_TYPE_MAILBOX: found = tile->hasFlag(TILESTATE_MAILBOX); break; case ITEM_TYPE_TRASHHOLDER: found = tile->hasFlag(TILESTATE_TRASHHOLDER); break; case ITEM_TYPE_BED: found = tile->hasFlag(TILESTATE_BED); break; case ITEM_TYPE_DEPOT: found = tile->hasFlag(TILESTATE_DEPOT); break; default: found = true; break; } if (!found) { lua_pushnil(L); return 1; } if (Item* item = tile->getGround()) { const ItemType& it = Item::items[item->getID()]; if (it.type == itemType) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); return 1; } } if (const TileItemVector* items = tile->getItemList()) { for (Item* item : *items) { const ItemType& it = Item::items[item->getID()]; if (it.type == itemType) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); return 1; } } } lua_pushnil(L); return 1; } int LuaScriptInterface::luaTileGetItemByTopOrder(lua_State* L) { // tile:getItemByTopOrder(topOrder) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } int32_t topOrder = getNumber<int32_t>(L, 2); Item* item = tile->getItemByTopOrder(topOrder); if (!item) { lua_pushnil(L); return 1; } pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); return 1; } int LuaScriptInterface::luaTileGetItemCountById(lua_State* L) { // tile:getItemCountById(itemId[, subType = -1]) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } int32_t subType = getNumber<int32_t>(L, 3, -1); uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } lua_pushnumber(L, tile->getItemTypeCount(itemId, subType)); return 1; } int LuaScriptInterface::luaTileGetBottomCreature(lua_State* L) { // tile:getBottomCreature() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } const Creature* creature = tile->getBottomCreature(); if (!creature) { lua_pushnil(L); return 1; } pushUserdata<const Creature>(L, creature); setCreatureMetatable(L, -1, creature); return 1; } int LuaScriptInterface::luaTileGetTopCreature(lua_State* L) { // tile:getTopCreature() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Creature* creature = tile->getTopCreature(); if (!creature) { lua_pushnil(L); return 1; } pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); return 1; } int LuaScriptInterface::luaTileGetBottomVisibleCreature(lua_State* L) { // tile:getBottomVisibleCreature(creature) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Creature* creature = getCreature(L, 2); if (!creature) { lua_pushnil(L); return 1; } const Creature* visibleCreature = tile->getBottomVisibleCreature(creature); if (visibleCreature) { pushUserdata<const Creature>(L, visibleCreature); setCreatureMetatable(L, -1, visibleCreature); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetTopVisibleCreature(lua_State* L) { // tile:getTopVisibleCreature(creature) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Creature* creature = getCreature(L, 2); if (!creature) { lua_pushnil(L); return 1; } Creature* visibleCreature = tile->getTopVisibleCreature(creature); if (visibleCreature) { pushUserdata<Creature>(L, visibleCreature); setCreatureMetatable(L, -1, visibleCreature); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetItems(lua_State* L) { // tile:getItems() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } TileItemVector* itemVector = tile->getItemList(); if (!itemVector) { lua_pushnil(L); return 1; } lua_createtable(L, itemVector->size(), 0); int index = 0; for (Item* item : *itemVector) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaTileGetItemCount(lua_State* L) { // tile:getItemCount() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } lua_pushnumber(L, tile->getItemCount()); return 1; } int LuaScriptInterface::luaTileGetDownItemCount(lua_State* L) { // tile:getDownItemCount() Tile* tile = getUserdata<Tile>(L, 1); if (tile) { lua_pushnumber(L, tile->getDownItemCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetTopItemCount(lua_State* L) { // tile:getTopItemCount() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } lua_pushnumber(L, tile->getTopItemCount()); return 1; } int LuaScriptInterface::luaTileGetCreatures(lua_State* L) { // tile:getCreatures() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } CreatureVector* creatureVector = tile->getCreatures(); if (!creatureVector) { lua_pushnil(L); return 1; } lua_createtable(L, creatureVector->size(), 0); int index = 0; for (Creature* creature : *creatureVector) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaTileGetCreatureCount(lua_State* L) { // tile:getCreatureCount() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } lua_pushnumber(L, tile->getCreatureCount()); return 1; } int LuaScriptInterface::luaTileHasProperty(lua_State* L) { // tile:hasProperty(property[, item]) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Item* item; if (lua_gettop(L) >= 3) { item = getUserdata<Item>(L, 3); } else { item = nullptr; } ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2); if (item) { pushBoolean(L, tile->hasProperty(item, property)); } else { pushBoolean(L, tile->hasProperty(property)); } return 1; } int LuaScriptInterface::luaTileGetThingIndex(lua_State* L) { // tile:getThingIndex(thing) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Thing* thing = getThing(L, 2); if (thing) { lua_pushnumber(L, tile->getThingIndex(thing)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileHasFlag(lua_State* L) { // tile:hasFlag(flag) Tile* tile = getUserdata<Tile>(L, 1); if (tile) { tileflags_t flag = getNumber<tileflags_t>(L, 2); pushBoolean(L, tile->hasFlag(flag)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileQueryAdd(lua_State* L) { // tile:queryAdd(thing[, flags]) Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } Thing* thing = getThing(L, 2); if (thing) { uint32_t flags = getNumber<uint32_t>(L, 3, 0); lua_pushnumber(L, tile->queryAdd(0, *thing, 1, flags)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTileGetHouse(lua_State* L) { // tile:getHouse() Tile* tile = getUserdata<Tile>(L, 1); if (!tile) { lua_pushnil(L); return 1; } if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tile)) { pushUserdata<House>(L, houseTile->getHouse()); setMetatable(L, -1, "House"); } else { lua_pushnil(L); } return 1; } // NetworkMessage int LuaScriptInterface::luaNetworkMessageCreate(lua_State* L) { // NetworkMessage() pushUserdata<NetworkMessage>(L, new NetworkMessage); setMetatable(L, -1, "NetworkMessage"); return 1; } int LuaScriptInterface::luaNetworkMessageDelete(lua_State* L) { NetworkMessage** messagePtr = getRawUserdata<NetworkMessage>(L, 1); if (messagePtr && *messagePtr) { delete *messagePtr; *messagePtr = nullptr; } return 0; } int LuaScriptInterface::luaNetworkMessageGetByte(lua_State* L) { // networkMessage:getByte() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { lua_pushnumber(L, message->getByte()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageGetU16(lua_State* L) { // networkMessage:getU16() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { lua_pushnumber(L, message->get<uint16_t>()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageGetU32(lua_State* L) { // networkMessage:getU32() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { lua_pushnumber(L, message->get<uint32_t>()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageGetU64(lua_State* L) { // networkMessage:getU64() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { lua_pushnumber(L, message->get<uint64_t>()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageGetString(lua_State* L) { // networkMessage:getString() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { pushString(L, message->getString()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageGetPosition(lua_State* L) { // networkMessage:getPosition() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { pushPosition(L, message->getPosition()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddByte(lua_State* L) { // networkMessage:addByte(number) uint8_t number = getNumber<uint8_t>(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->addByte(number); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddU16(lua_State* L) { // networkMessage:addU16(number) uint16_t number = getNumber<uint16_t>(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->add<uint16_t>(number); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddU32(lua_State* L) { // networkMessage:addU32(number) uint32_t number = getNumber<uint32_t>(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->add<uint32_t>(number); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddU64(lua_State* L) { // networkMessage:addU64(number) uint64_t number = getNumber<uint64_t>(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->add<uint64_t>(number); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddString(lua_State* L) { // networkMessage:addString(string) const std::string& string = getString(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->addString(string); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddPosition(lua_State* L) { // networkMessage:addPosition(position) const Position& position = getPosition(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->addPosition(position); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddDouble(lua_State* L) { // networkMessage:addDouble(number) double number = getNumber<double>(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->addDouble(number); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddItem(lua_State* L) { // networkMessage:addItem(item) Item* item = getUserdata<Item>(L, 2); if (!item) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); lua_pushnil(L); return 1; } NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->addItem(item); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageAddItemId(lua_State* L) { // networkMessage:addItemId(itemId) NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (!message) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } message->addItemId(itemId); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaNetworkMessageReset(lua_State* L) { // networkMessage:reset() NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->reset(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageSkipBytes(lua_State* L) { // networkMessage:skipBytes(number) int16_t number = getNumber<int16_t>(L, 2); NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (message) { message->skipBytes(number); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNetworkMessageSendToPlayer(lua_State* L) { // networkMessage:sendToPlayer(player) NetworkMessage* message = getUserdata<NetworkMessage>(L, 1); if (!message) { lua_pushnil(L); return 1; } Player* player = getPlayer(L, 2); if (player) { player->sendNetworkMessage(*message); pushBoolean(L, true); } else { reportErrorFunc(getErrorDesc(LUA_ERROR_PLAYER_NOT_FOUND)); lua_pushnil(L); } return 1; } // ModalWindow int LuaScriptInterface::luaModalWindowCreate(lua_State* L) { // ModalWindow(id, title, message) const std::string& message = getString(L, 4); const std::string& title = getString(L, 3); uint32_t id = getNumber<uint32_t>(L, 2); pushUserdata<ModalWindow>(L, new ModalWindow(id, title, message)); setMetatable(L, -1, "ModalWindow"); return 1; } int LuaScriptInterface::luaModalWindowDelete(lua_State* L) { ModalWindow** windowPtr = getRawUserdata<ModalWindow>(L, 1); if (windowPtr && *windowPtr) { delete *windowPtr; *windowPtr = nullptr; } return 0; } int LuaScriptInterface::luaModalWindowGetId(lua_State* L) { // modalWindow:getId() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { lua_pushnumber(L, window->id); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowGetTitle(lua_State* L) { // modalWindow:getTitle() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { pushString(L, window->title); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowGetMessage(lua_State* L) { // modalWindow:getMessage() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { pushString(L, window->message); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowSetTitle(lua_State* L) { // modalWindow:setTitle(text) const std::string& text = getString(L, 2); ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->title = text; pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowSetMessage(lua_State* L) { // modalWindow:setMessage(text) const std::string& text = getString(L, 2); ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->message = text; pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowGetButtonCount(lua_State* L) { // modalWindow:getButtonCount() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { lua_pushnumber(L, window->buttons.size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowGetChoiceCount(lua_State* L) { // modalWindow:getChoiceCount() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { lua_pushnumber(L, window->choices.size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowAddButton(lua_State* L) { // modalWindow:addButton(id, text) const std::string& text = getString(L, 3); uint8_t id = getNumber<uint8_t>(L, 2); ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->buttons.emplace_back(text, id); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowAddChoice(lua_State* L) { // modalWindow:addChoice(id, text) const std::string& text = getString(L, 3); uint8_t id = getNumber<uint8_t>(L, 2); ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->choices.emplace_back(text, id); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowGetDefaultEnterButton(lua_State* L) { // modalWindow:getDefaultEnterButton() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { lua_pushnumber(L, window->defaultEnterButton); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowSetDefaultEnterButton(lua_State* L) { // modalWindow:setDefaultEnterButton(buttonId) ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->defaultEnterButton = getNumber<uint8_t>(L, 2); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowGetDefaultEscapeButton(lua_State* L) { // modalWindow:getDefaultEscapeButton() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { lua_pushnumber(L, window->defaultEscapeButton); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowSetDefaultEscapeButton(lua_State* L) { // modalWindow:setDefaultEscapeButton(buttonId) ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->defaultEscapeButton = getNumber<uint8_t>(L, 2); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowHasPriority(lua_State* L) { // modalWindow:hasPriority() ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { pushBoolean(L, window->priority); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowSetPriority(lua_State* L) { // modalWindow:setPriority(priority) ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { window->priority = getBoolean(L, 2); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaModalWindowSendToPlayer(lua_State* L) { // modalWindow:sendToPlayer(player) Player* player = getPlayer(L, 2); if (!player) { lua_pushnil(L); return 1; } ModalWindow* window = getUserdata<ModalWindow>(L, 1); if (window) { if (!player->hasModalWindowOpen(window->id)) { player->sendModalWindow(*window); } pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } // Item int LuaScriptInterface::luaItemCreate(lua_State* L) { // Item(uid) uint32_t id = getNumber<uint32_t>(L, 2); Item* item = getScriptEnv()->getItemByUID(id); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemIsItem(lua_State* L) { // item:isItem() pushBoolean(L, getUserdata<const Item>(L, 1) != nullptr); return 1; } int LuaScriptInterface::luaItemGetParent(lua_State* L) { // item:getParent() Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } Cylinder* parent = item->getParent(); if (!parent) { lua_pushnil(L); return 1; } pushCylinder(L, parent); return 1; } int LuaScriptInterface::luaItemGetTopParent(lua_State* L) { // item:getTopParent() Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } Cylinder* topParent = item->getTopParent(); if (!topParent) { lua_pushnil(L); return 1; } pushCylinder(L, topParent); return 1; } int LuaScriptInterface::luaItemGetId(lua_State* L) { // item:getId() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getID()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemClone(lua_State* L) { // item:clone() Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } Item* clone = item->clone(); if (!clone) { lua_pushnil(L); return 1; } getScriptEnv()->addTempItem(clone); clone->setParent(VirtualCylinder::virtualCylinder); pushUserdata<Item>(L, clone); setItemMetatable(L, -1, clone); return 1; } int LuaScriptInterface::luaItemSplit(lua_State* L) { // item:split([count = 1]) Item** itemPtr = getRawUserdata<Item>(L, 1); if (!itemPtr) { lua_pushnil(L); return 1; } Item* item = *itemPtr; if (!item || !item->isStackable()) { lua_pushnil(L); return 1; } uint16_t count = std::min<uint16_t>(getNumber<uint16_t>(L, 2, 1), item->getItemCount()); uint16_t diff = item->getItemCount() - count; Item* splitItem = item->clone(); if (!splitItem) { lua_pushnil(L); return 1; } ScriptEnvironment* env = getScriptEnv(); uint32_t uid = env->addThing(item); Item* newItem = g_game.transformItem(item, item->getID(), diff); if (item->isRemoved()) { env->removeItemByUID(uid); } if (newItem && newItem != item) { env->insertItem(uid, newItem); } *itemPtr = newItem; splitItem->setParent(VirtualCylinder::virtualCylinder); env->addTempItem(splitItem); pushUserdata<Item>(L, splitItem); setItemMetatable(L, -1, splitItem); return 1; } int LuaScriptInterface::luaItemRemove(lua_State* L) { // item:remove([count = -1]) Item* item = getUserdata<Item>(L, 1); if (item) { int32_t count = getNumber<int32_t>(L, 2, -1); pushBoolean(L, g_game.internalRemoveItem(item, count) == RETURNVALUE_NOERROR); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetUniqueId(lua_State* L) { // item:getUniqueId() Item* item = getUserdata<Item>(L, 1); if (item) { uint32_t uniqueId = item->getUniqueId(); if (uniqueId == 0) { uniqueId = getScriptEnv()->addThing(item); } lua_pushnumber(L, uniqueId); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetActionId(lua_State* L) { // item:getActionId() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getActionId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemSetActionId(lua_State* L) { // item:setActionId(actionId) uint16_t actionId = getNumber<uint16_t>(L, 2); Item* item = getUserdata<Item>(L, 1); if (item) { item->setActionId(actionId); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetCount(lua_State* L) { // item:getCount() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getItemCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetCharges(lua_State* L) { // item:getCharges() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getCharges()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetFluidType(lua_State* L) { // item:getFluidType() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getFluidType()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetWeight(lua_State* L) { // item:getWeight() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getWeight()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetSubType(lua_State* L) { // item:getSubType() Item* item = getUserdata<Item>(L, 1); if (item) { lua_pushnumber(L, item->getSubType()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetName(lua_State* L) { // item:getName() Item* item = getUserdata<Item>(L, 1); if (item) { pushString(L, item->getName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetPluralName(lua_State* L) { // item:getPluralName() Item* item = getUserdata<Item>(L, 1); if (item) { pushString(L, item->getPluralName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetArticle(lua_State* L) { // item:getArticle() Item* item = getUserdata<Item>(L, 1); if (item) { pushString(L, item->getArticle()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetPosition(lua_State* L) { // item:getPosition() Item* item = getUserdata<Item>(L, 1); if (item) { pushPosition(L, item->getPosition()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetTile(lua_State* L) { // item:getTile() Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } Tile* tile = item->getTile(); if (tile) { pushUserdata<Tile>(L, tile); setMetatable(L, -1, "Tile"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemHasAttribute(lua_State* L) { // item:hasAttribute(key) Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } itemAttrTypes attribute; if (isNumber(L, 2)) { attribute = getNumber<itemAttrTypes>(L, 2); } else if (isString(L, 2)) { attribute = stringToItemAttribute(getString(L, 2)); } else { attribute = ITEM_ATTRIBUTE_NONE; } pushBoolean(L, item->hasAttribute(attribute)); return 1; } int LuaScriptInterface::luaItemGetAttribute(lua_State* L) { // item:getAttribute(key) Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } itemAttrTypes attribute; if (isNumber(L, 2)) { attribute = getNumber<itemAttrTypes>(L, 2); } else if (isString(L, 2)) { attribute = stringToItemAttribute(getString(L, 2)); } else { attribute = ITEM_ATTRIBUTE_NONE; } if (ItemAttributes::isIntAttrType(attribute)) { lua_pushnumber(L, item->getIntAttr(attribute)); } else if (ItemAttributes::isStrAttrType(attribute)) { pushString(L, item->getStrAttr(attribute)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemSetAttribute(lua_State* L) { // item:setAttribute(key, value) Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } itemAttrTypes attribute; if (isNumber(L, 2)) { attribute = getNumber<itemAttrTypes>(L, 2); } else if (isString(L, 2)) { attribute = stringToItemAttribute(getString(L, 2)); } else { attribute = ITEM_ATTRIBUTE_NONE; } if (ItemAttributes::isIntAttrType(attribute)) { if (attribute == ITEM_ATTRIBUTE_UNIQUEID) { reportErrorFunc("Attempt to set protected key \"uid\""); pushBoolean(L, false); return 1; } item->setIntAttr(attribute, getNumber<int32_t>(L, 3)); pushBoolean(L, true); } else if (ItemAttributes::isStrAttrType(attribute)) { item->setStrAttr(attribute, getString(L, 3)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemRemoveAttribute(lua_State* L) { // item:removeAttribute(key) Item* item = getUserdata<Item>(L, 1); if (!item) { lua_pushnil(L); return 1; } itemAttrTypes attribute; if (isNumber(L, 2)) { attribute = getNumber<itemAttrTypes>(L, 2); } else if (isString(L, 2)) { attribute = stringToItemAttribute(getString(L, 2)); } else { attribute = ITEM_ATTRIBUTE_NONE; } bool ret = attribute != ITEM_ATTRIBUTE_UNIQUEID; if (ret) { item->removeAttribute(attribute); } else { reportErrorFunc("Attempt to erase protected key \"uid\""); } pushBoolean(L, ret); return 1; } int LuaScriptInterface::luaItemMoveTo(lua_State* L) { // item:moveTo(position or cylinder) Item** itemPtr = getRawUserdata<Item>(L, 1); if (!itemPtr) { lua_pushnil(L); return 1; } Item* item = *itemPtr; if (!item || item->isRemoved()) { lua_pushnil(L); return 1; } Cylinder* toCylinder; if (isUserdata(L, 2)) { const LuaDataType type = getUserdataType(L, 2); switch (type) { case LuaData_Container: toCylinder = getUserdata<Container>(L, 2); break; case LuaData_Player: toCylinder = getUserdata<Player>(L, 2); break; case LuaData_Tile: toCylinder = getUserdata<Tile>(L, 2); break; default: toCylinder = nullptr; break; } } else { toCylinder = g_game.map.getTile(getPosition(L, 2)); } if (!toCylinder) { lua_pushnil(L); return 1; } if (item->getParent() == toCylinder) { pushBoolean(L, true); return 1; } if (item->getParent() == VirtualCylinder::virtualCylinder) { pushBoolean(L, g_game.internalAddItem(toCylinder, item) == RETURNVALUE_NOERROR); } else { Item* moveItem = nullptr; ReturnValue ret = g_game.internalMoveItem(item->getParent(), toCylinder, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, FLAG_NOLIMIT | FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE | FLAG_IGNORENOTMOVEABLE); if (moveItem) { *itemPtr = moveItem; } pushBoolean(L, ret == RETURNVALUE_NOERROR); } return 1; } int LuaScriptInterface::luaItemTransform(lua_State* L) { // item:transform(itemId[, count/subType = -1]) Item** itemPtr = getRawUserdata<Item>(L, 1); if (!itemPtr) { lua_pushnil(L); return 1; } Item*& item = *itemPtr; if (!item) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } int32_t subType = getNumber<int32_t>(L, 3, -1); if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { pushBoolean(L, true); return 1; } const ItemType& it = Item::items[itemId]; if (it.stackable) { subType = std::min<int32_t>(subType, 100); } ScriptEnvironment* env = getScriptEnv(); uint32_t uid = env->addThing(item); Item* newItem = g_game.transformItem(item, itemId, subType); if (item->isRemoved()) { env->removeItemByUID(uid); } if (newItem && newItem != item) { env->insertItem(uid, newItem); } item = newItem; pushBoolean(L, true); return 1; } int LuaScriptInterface::luaItemDecay(lua_State* L) { // item:decay() Item* item = getUserdata<Item>(L, 1); if (item) { g_game.startDecay(item); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemGetDescription(lua_State* L) { // item:getDescription(distance) Item* item = getUserdata<Item>(L, 1); if (item) { int32_t distance = getNumber<int32_t>(L, 2); pushString(L, item->getDescription(distance)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemHasProperty(lua_State* L) { // item:hasProperty(property) Item* item = getUserdata<Item>(L, 1); if (item) { ITEMPROPERTY property = getNumber<ITEMPROPERTY>(L, 2); pushBoolean(L, item->hasProperty(property)); } else { lua_pushnil(L); } return 1; } // Container int LuaScriptInterface::luaContainerCreate(lua_State* L) { // Container(uid) uint32_t id = getNumber<uint32_t>(L, 2); Container* container = getScriptEnv()->getContainerByUID(id); if (container) { pushUserdata(L, container); setMetatable(L, -1, "Container"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerGetSize(lua_State* L) { // container:getSize() Container* container = getUserdata<Container>(L, 1); if (container) { lua_pushnumber(L, container->size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerGetCapacity(lua_State* L) { // container:getCapacity() Container* container = getUserdata<Container>(L, 1); if (container) { lua_pushnumber(L, container->capacity()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerGetEmptySlots(lua_State* L) { // container:getEmptySlots([recursive = false]) Container* container = getUserdata<Container>(L, 1); if (!container) { lua_pushnil(L); return 1; } uint32_t slots = container->capacity() - container->size(); bool recursive = getBoolean(L, 2, false); if (recursive) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { if (Container* tmpContainer = (*it)->getContainer()) { slots += tmpContainer->capacity() - tmpContainer->size(); } } } lua_pushnumber(L, slots); return 1; } int LuaScriptInterface::luaContainerGetItemHoldingCount(lua_State* L) { // container:getItemHoldingCount() Container* container = getUserdata<Container>(L, 1); if (container) { lua_pushnumber(L, container->getItemHoldingCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerGetItem(lua_State* L) { // container:getItem(index) Container* container = getUserdata<Container>(L, 1); if (!container) { lua_pushnil(L); return 1; } uint32_t index = getNumber<uint32_t>(L, 2); Item* item = container->getItemByIndex(index); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerHasItem(lua_State* L) { // container:hasItem(item) Item* item = getUserdata<Item>(L, 2); Container* container = getUserdata<Container>(L, 1); if (container) { pushBoolean(L, container->isHoldingItem(item)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerAddItem(lua_State* L) { // container:addItem(itemId[, count/subType = 1[, index = INDEX_WHEREEVER[, flags = 0]]]) Container* container = getUserdata<Container>(L, 1); if (!container) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } uint32_t subType = getNumber<uint32_t>(L, 3, 1); Item* item = Item::CreateItem(itemId, std::min<uint32_t>(subType, 100)); if (!item) { lua_pushnil(L); return 1; } int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER); uint32_t flags = getNumber<uint32_t>(L, 5, 0); ReturnValue ret = g_game.internalAddItem(container, item, index, flags); if (ret == RETURNVALUE_NOERROR) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { delete item; lua_pushnil(L); } return 1; } int LuaScriptInterface::luaContainerAddItemEx(lua_State* L) { // container:addItemEx(item[, index = INDEX_WHEREEVER[, flags = 0]]) Item* item = getUserdata<Item>(L, 2); if (!item) { lua_pushnil(L); return 1; } Container* container = getUserdata<Container>(L, 1); if (!container) { lua_pushnil(L); return 1; } if (item->getParent() != VirtualCylinder::virtualCylinder) { reportErrorFunc("Item already has a parent"); lua_pushnil(L); return 1; } int32_t index = getNumber<int32_t>(L, 3, INDEX_WHEREEVER); uint32_t flags = getNumber<uint32_t>(L, 4, 0); ReturnValue ret = g_game.internalAddItem(container, item, index, flags); if (ret == RETURNVALUE_NOERROR) { ScriptEnvironment::removeTempItem(item); } lua_pushnumber(L, ret); return 1; } int LuaScriptInterface::luaContainerGetItemCountById(lua_State* L) { // container:getItemCountById(itemId[, subType = -1]) Container* container = getUserdata<Container>(L, 1); if (!container) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } int32_t subType = getNumber<int32_t>(L, 3, -1); lua_pushnumber(L, container->getItemTypeCount(itemId, subType)); return 1; } // Teleport int LuaScriptInterface::luaTeleportCreate(lua_State* L) { // Teleport(uid) uint32_t id = getNumber<uint32_t>(L, 2); Item* item = getScriptEnv()->getItemByUID(id); if (item && item->getTeleport()) { pushUserdata(L, item); setMetatable(L, -1, "Teleport"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTeleportGetDestination(lua_State* L) { // teleport:getDestination() Teleport* teleport = getUserdata<Teleport>(L, 1); if (teleport) { pushPosition(L, teleport->getDestPos()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTeleportSetDestination(lua_State* L) { // teleport:setDestination(position) Teleport* teleport = getUserdata<Teleport>(L, 1); if (teleport) { teleport->setDestPos(getPosition(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } // Creature int LuaScriptInterface::luaCreatureCreate(lua_State* L) { // Creature(id or name or userdata) Creature* creature; if (isNumber(L, 2)) { creature = g_game.getCreatureByID(getNumber<uint32_t>(L, 2)); } else if (isString(L, 2)) { creature = g_game.getCreatureByName(getString(L, 2)); } else if (isUserdata(L, 2)) { LuaDataType type = getUserdataType(L, 2); if (type != LuaData_Player && type != LuaData_Monster && type != LuaData_Npc) { lua_pushnil(L); return 1; } creature = getUserdata<Creature>(L, 2); } else { creature = nullptr; } if (creature) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetEvents(lua_State* L) { // creature:getEvents(type) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } CreatureEventType_t eventType = getNumber<CreatureEventType_t>(L, 2); const auto& eventList = creature->getCreatureEvents(eventType); lua_createtable(L, eventList.size(), 0); int index = 0; for (CreatureEvent* event : eventList) { pushString(L, event->getName()); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaCreatureRegisterEvent(lua_State* L) { // creature:registerEvent(name) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { const std::string& name = getString(L, 2); pushBoolean(L, creature->registerCreatureEvent(name)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureUnregisterEvent(lua_State* L) { // creature:unregisterEvent(name) const std::string& name = getString(L, 2); Creature* creature = getUserdata<Creature>(L, 1); if (creature) { pushBoolean(L, creature->unregisterCreatureEvent(name)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureIsRemoved(lua_State* L) { // creature:isRemoved() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { pushBoolean(L, creature->isRemoved()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureIsCreature(lua_State* L) { // creature:isCreature() pushBoolean(L, getUserdata<const Creature>(L, 1) != nullptr); return 1; } int LuaScriptInterface::luaCreatureIsInGhostMode(lua_State* L) { // creature:isInGhostMode() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { pushBoolean(L, creature->isInGhostMode()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureIsHealthHidden(lua_State* L) { // creature:isHealthHidden() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { pushBoolean(L, creature->isHealthHidden()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureCanSee(lua_State* L) { // creature:canSee(position) const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { const Position& position = getPosition(L, 2); pushBoolean(L, creature->canSee(position)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureCanSeeCreature(lua_State* L) { // creature:canSeeCreature(creature) const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { const Creature* otherCreature = getCreature(L, 2); pushBoolean(L, creature->canSeeCreature(otherCreature)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetParent(lua_State* L) { // creature:getParent() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } Cylinder* parent = creature->getParent(); if (!parent) { lua_pushnil(L); return 1; } pushCylinder(L, parent); return 1; } int LuaScriptInterface::luaCreatureGetId(lua_State* L) { // creature:getId() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getID()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetName(lua_State* L) { // creature:getName() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { pushString(L, creature->getName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetTarget(lua_State* L) { // creature:getTarget() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } Creature* target = creature->getAttackedCreature(); if (target) { pushUserdata<Creature>(L, target); setCreatureMetatable(L, -1, target); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureSetTarget(lua_State* L) { // creature:setTarget(target) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { Creature* target = getCreature(L, 2); pushBoolean(L, creature->setAttackedCreature(target)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetFollowCreature(lua_State* L) { // creature:getFollowCreature() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } Creature* followCreature = creature->getFollowCreature(); if (followCreature) { pushUserdata<Creature>(L, followCreature); setCreatureMetatable(L, -1, followCreature); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureSetFollowCreature(lua_State* L) { // creature:setFollowCreature(followedCreature) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { Creature* followCreature = getCreature(L, 2); pushBoolean(L, creature->setFollowCreature(followCreature)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetMaster(lua_State* L) { // creature:getMaster() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } Creature* master = creature->getMaster(); if (!master) { lua_pushnil(L); return 1; } pushUserdata<Creature>(L, master); setCreatureMetatable(L, -1, master); return 1; } int LuaScriptInterface::luaCreatureSetMaster(lua_State* L) { // creature:setMaster(master) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } Creature* master = getCreature(L, 2); if (master) { pushBoolean(L, creature->convinceCreature(master)); } else { master = creature->getMaster(); if (master) { master->removeSummon(creature); creature->incrementReferenceCounter(); creature->setDropLoot(true); } pushBoolean(L, true); } g_game.updateCreatureType(creature); return 1; } int LuaScriptInterface::luaCreatureGetLight(lua_State* L) { // creature:getLight() const Creature* creature = getUserdata<const Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } LightInfo light; creature->getCreatureLight(light); lua_pushnumber(L, light.level); lua_pushnumber(L, light.color); return 2; } int LuaScriptInterface::luaCreatureSetLight(lua_State* L) { // creature:setLight(color, level) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } LightInfo light; light.color = getNumber<uint8_t>(L, 2); light.level = getNumber<uint8_t>(L, 3); creature->setCreatureLight(light); g_game.changeLight(creature); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCreatureGetSpeed(lua_State* L) { // creature:getSpeed() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getSpeed()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetBaseSpeed(lua_State* L) { // creature:getBaseSpeed() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getBaseSpeed()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureChangeSpeed(lua_State* L) { // creature:changeSpeed(delta) Creature* creature = getCreature(L, 1); if (!creature) { reportErrorFunc(getErrorDesc(LUA_ERROR_CREATURE_NOT_FOUND)); pushBoolean(L, false); return 1; } int32_t delta = getNumber<int32_t>(L, 2); g_game.changeSpeed(creature, delta); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCreatureSetDropLoot(lua_State* L) { // creature:setDropLoot(doDrop) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { creature->setDropLoot(getBoolean(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetPosition(lua_State* L) { // creature:getPosition() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { pushPosition(L, creature->getPosition()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetTile(lua_State* L) { // creature:getTile() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } Tile* tile = creature->getTile(); if (tile) { pushUserdata<Tile>(L, tile); setMetatable(L, -1, "Tile"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetDirection(lua_State* L) { // creature:getDirection() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getDirection()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureSetDirection(lua_State* L) { // creature:setDirection(direction) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { pushBoolean(L, g_game.internalCreatureTurn(creature, getNumber<Direction>(L, 2))); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetHealth(lua_State* L) { // creature:getHealth() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getHealth()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureAddHealth(lua_State* L) { // creature:addHealth(healthChange) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } CombatDamage damage; damage.primary.value = getNumber<int32_t>(L, 2); if (damage.primary.value >= 0) { damage.primary.type = COMBAT_HEALING; } else { damage.primary.type = COMBAT_UNDEFINEDDAMAGE; } pushBoolean(L, g_game.combatChangeHealth(nullptr, creature, damage)); return 1; } int LuaScriptInterface::luaCreatureGetMaxHealth(lua_State* L) { // creature:getMaxHealth() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getMaxHealth()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureSetMaxHealth(lua_State* L) { // creature:setMaxHealth(maxHealth) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } creature->healthMax = getNumber<uint32_t>(L, 2); creature->health = std::min<int32_t>(creature->health, creature->healthMax); g_game.addCreatureHealth(creature); Player* player = creature->getPlayer(); if (player) { player->sendStats(); } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCreatureSetHiddenHealth(lua_State* L) { // creature:setHiddenHealth(hide) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { creature->setHiddenHealth(getBoolean(L, 2)); g_game.addCreatureHealth(creature); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetMana(lua_State* L) { // creature:getMana() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getMana()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureAddMana(lua_State* L) { // creature:addMana(manaChange[, animationOnLoss = false]) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } int32_t manaChange = getNumber<int32_t>(L, 2); bool animationOnLoss = getBoolean(L, 3, false); if (!animationOnLoss && manaChange < 0) { creature->changeMana(manaChange); } else { g_game.combatChangeMana(nullptr, creature, manaChange, ORIGIN_NONE); } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCreatureGetMaxMana(lua_State* L) { // creature:getMaxMana() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getMaxMana()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetSkull(lua_State* L) { // creature:getSkull() Creature* creature = getUserdata<Creature>(L, 1); if (creature) { lua_pushnumber(L, creature->getSkull()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureSetSkull(lua_State* L) { // creature:setSkull(skull) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { creature->setSkull(getNumber<Skulls_t>(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetOutfit(lua_State* L) { // creature:getOutfit() const Creature* creature = getUserdata<const Creature>(L, 1); if (creature) { pushOutfit(L, creature->getCurrentOutfit()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureSetOutfit(lua_State* L) { // creature:setOutfit(outfit) Creature* creature = getUserdata<Creature>(L, 1); if (creature) { creature->defaultOutfit = getOutfit(L, 2); g_game.internalCreatureChangeOutfit(creature, creature->defaultOutfit); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetCondition(lua_State* L) { // creature:getCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0]]) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2); ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT); uint32_t subId = getNumber<uint32_t>(L, 4, 0); Condition* condition = creature->getCondition(conditionType, conditionId, subId); if (condition) { pushUserdata<Condition>(L, condition); setWeakMetatable(L, -1, "Condition"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureAddCondition(lua_State* L) { // creature:addCondition(condition[, force = false]) Creature* creature = getUserdata<Creature>(L, 1); Condition* condition = getUserdata<Condition>(L, 2); if (creature && condition) { bool force = getBoolean(L, 3, false); pushBoolean(L, creature->addCondition(condition->clone(), force)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureRemoveCondition(lua_State* L) { // creature:removeCondition(conditionType[, conditionId = CONDITIONID_COMBAT[, subId = 0[, force = false]]]) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2); ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT); uint32_t subId = getNumber<uint32_t>(L, 4, 0); Condition* condition = creature->getCondition(conditionType, conditionId, subId); if (condition) { bool force = getBoolean(L, 5, false); creature->removeCondition(condition, force); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureRemove(lua_State* L) { // creature:remove() Creature** creaturePtr = getRawUserdata<Creature>(L, 1); if (!creaturePtr) { lua_pushnil(L); return 1; } Creature* creature = *creaturePtr; if (!creature) { lua_pushnil(L); return 1; } Player* player = creature->getPlayer(); if (player) { player->kickPlayer(true); } else { g_game.removeCreature(creature); } *creaturePtr = nullptr; pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCreatureTeleportTo(lua_State* L) { // creature:teleportTo(position[, pushMovement = false]) bool pushMovement = getBoolean(L, 3, false); const Position& position = getPosition(L, 2); Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } const Position oldPosition = creature->getPosition(); if (g_game.internalTeleport(creature, position, pushMovement) != RETURNVALUE_NOERROR) { pushBoolean(L, false); return 1; } if (!pushMovement) { if (oldPosition.x == position.x) { if (oldPosition.y < position.y) { g_game.internalCreatureTurn(creature, DIRECTION_SOUTH); } else { g_game.internalCreatureTurn(creature, DIRECTION_NORTH); } } else if (oldPosition.x > position.x) { g_game.internalCreatureTurn(creature, DIRECTION_WEST); } else if (oldPosition.x < position.x) { g_game.internalCreatureTurn(creature, DIRECTION_EAST); } } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCreatureSay(lua_State* L) { // creature:say(text, type[, ghost = false[, target = nullptr[, position]]]) int parameters = lua_gettop(L); Position position; if (parameters >= 6) { position = getPosition(L, 6); if (!position.x || !position.y) { reportErrorFunc("Invalid position specified."); pushBoolean(L, false); return 1; } } Creature* target = nullptr; if (parameters >= 5) { target = getCreature(L, 5); } bool ghost = getBoolean(L, 4, false); SpeakClasses type = getNumber<SpeakClasses>(L, 3); const std::string& text = getString(L, 2); Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } SpectatorHashSet spectators; if (target) { spectators.insert(target); } if (position.x != 0) { pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators, &position)); } else { pushBoolean(L, g_game.internalCreatureSay(creature, type, text, ghost, &spectators)); } return 1; } int LuaScriptInterface::luaCreatureGetDamageMap(lua_State* L) { // creature:getDamageMap() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } lua_createtable(L, creature->damageMap.size(), 0); for (auto damageEntry : creature->damageMap) { lua_createtable(L, 0, 2); setField(L, "total", damageEntry.second.total); setField(L, "ticks", damageEntry.second.ticks); lua_rawseti(L, -2, damageEntry.first); } return 1; } int LuaScriptInterface::luaCreatureGetSummons(lua_State* L) { // creature:getSummons() Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } lua_createtable(L, creature->getSummonCount(), 0); int index = 0; for (Creature* summon : creature->getSummons()) { pushUserdata<Creature>(L, summon); setCreatureMetatable(L, -1, summon); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaCreatureGetDescription(lua_State* L) { // creature:getDescription(distance) int32_t distance = getNumber<int32_t>(L, 2); Creature* creature = getUserdata<Creature>(L, 1); if (creature) { pushString(L, creature->getDescription(distance)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCreatureGetPathTo(lua_State* L) { // creature:getPathTo(pos[, minTargetDist = 0[, maxTargetDist = 1[, fullPathSearch = true[, clearSight = true[, maxSearchDist = 0]]]]]) Creature* creature = getUserdata<Creature>(L, 1); if (!creature) { lua_pushnil(L); return 1; } const Position& position = getPosition(L, 2); FindPathParams fpp; fpp.minTargetDist = getNumber<int32_t>(L, 3, 0); fpp.maxTargetDist = getNumber<int32_t>(L, 4, 1); fpp.fullPathSearch = getBoolean(L, 5, fpp.fullPathSearch); fpp.clearSight = getBoolean(L, 6, fpp.clearSight); fpp.maxSearchDist = getNumber<int32_t>(L, 7, fpp.maxSearchDist); std::forward_list<Direction> dirList; if (creature->getPathTo(position, dirList, fpp)) { lua_newtable(L); int index = 0; for (Direction dir : dirList) { lua_pushnumber(L, dir); lua_rawseti(L, -2, ++index); } } else { pushBoolean(L, false); } return 1; } // Player int LuaScriptInterface::luaPlayerCreate(lua_State* L) { // Player(id or name or userdata) Player* player; if (isNumber(L, 2)) { player = g_game.getPlayerByID(getNumber<uint32_t>(L, 2)); } else if (isString(L, 2)) { ReturnValue ret = g_game.getPlayerByNameWildcard(getString(L, 2), player); if (ret != RETURNVALUE_NOERROR) { lua_pushnil(L); lua_pushnumber(L, ret); return 2; } } else if (isUserdata(L, 2)) { if (getUserdataType(L, 2) != LuaData_Player) { lua_pushnil(L); return 1; } player = getUserdata<Player>(L, 2); } else { player = nullptr; } if (player) { pushUserdata<Player>(L, player); setMetatable(L, -1, "Player"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerIsPlayer(lua_State* L) { // player:isPlayer() pushBoolean(L, getUserdata<const Player>(L, 1) != nullptr); return 1; } int LuaScriptInterface::luaPlayerGetGuid(lua_State* L) { // player:getGuid() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getGUID()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetIp(lua_State* L) { // player:getIp() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getIP()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetAccountId(lua_State* L) { // player:getAccountId() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getAccount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetLastLoginSaved(lua_State* L) { // player:getLastLoginSaved() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getLastLoginSaved()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetLastLogout(lua_State* L) { // player:getLastLogout() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getLastLogout()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetAccountType(lua_State* L) { // player:getAccountType() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getAccountType()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetAccountType(lua_State* L) { // player:setAccountType(accountType) Player* player = getUserdata<Player>(L, 1); if (player) { player->accountType = getNumber<AccountType_t>(L, 2); IOLoginData::setAccountType(player->getAccount(), player->accountType); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetCapacity(lua_State* L) { // player:getCapacity() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getCapacity()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetCapacity(lua_State* L) { // player:setCapacity(capacity) Player* player = getUserdata<Player>(L, 1); if (player) { player->capacity = getNumber<uint32_t>(L, 2); player->sendStats(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetFreeCapacity(lua_State* L) { // player:getFreeCapacity() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getFreeCapacity()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetDepotChest(lua_State* L) { // player:getDepotChest(depotId[, autoCreate = false]) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint32_t depotId = getNumber<uint32_t>(L, 2); bool autoCreate = getBoolean(L, 3, false); DepotChest* depotChest = player->getDepotChest(depotId, autoCreate); if (depotChest) { pushUserdata<Item>(L, depotChest); setItemMetatable(L, -1, depotChest); } else { pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaPlayerGetInbox(lua_State* L) { // player:getInbox() Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Inbox* inbox = player->getInbox(); if (inbox) { pushUserdata<Item>(L, inbox); setItemMetatable(L, -1, inbox); } else { pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaPlayerGetSkullTime(lua_State* L) { // player:getSkullTime() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getSkullTicks()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetSkullTime(lua_State* L) { // player:setSkullTime(skullTime) Player* player = getUserdata<Player>(L, 1); if (player) { player->setSkullTicks(getNumber<int64_t>(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetDeathPenalty(lua_State* L) { // player:getDeathPenalty() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, static_cast<uint32_t>(player->getLostPercent() * 100)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetExperience(lua_State* L) { // player:getExperience() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getExperience()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddExperience(lua_State* L) { // player:addExperience(experience[, sendText = false]) Player* player = getUserdata<Player>(L, 1); if (player) { int64_t experience = getNumber<int64_t>(L, 2); bool sendText = getBoolean(L, 3, false); player->addExperience(nullptr, experience, sendText); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerRemoveExperience(lua_State* L) { // player:removeExperience(experience[, sendText = false]) Player* player = getUserdata<Player>(L, 1); if (player) { int64_t experience = getNumber<int64_t>(L, 2); bool sendText = getBoolean(L, 3, false); player->removeExperience(experience, sendText); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetLevel(lua_State* L) { // player:getLevel() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getLevel()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetMagicLevel(lua_State* L) { // player:getMagicLevel() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getMagicLevel()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetBaseMagicLevel(lua_State* L) { // player:getBaseMagicLevel() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getBaseMagicLevel()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetMaxMana(lua_State* L) { // player:setMaxMana(maxMana) Player* player = getPlayer(L, 1); if (player) { player->manaMax = getNumber<int32_t>(L, 2); player->mana = std::min<int32_t>(player->mana, player->manaMax); player->sendStats(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetManaSpent(lua_State* L) { // player:getManaSpent() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getSpentMana()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddManaSpent(lua_State* L) { // player:addManaSpent(amount) Player* player = getUserdata<Player>(L, 1); if (player) { player->addManaSpent(getNumber<uint64_t>(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetBaseMaxHealth(lua_State* L) { // player:getBaseMaxHealth() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->healthMax); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetBaseMaxMana(lua_State* L) { // player:getBaseMaxMana() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->manaMax); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetSkillLevel(lua_State* L) { // player:getSkillLevel(skillType) skills_t skillType = getNumber<skills_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player && skillType <= SKILL_LAST) { lua_pushnumber(L, player->skills[skillType].level); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetEffectiveSkillLevel(lua_State* L) { // player:getEffectiveSkillLevel(skillType) skills_t skillType = getNumber<skills_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player && skillType <= SKILL_LAST) { lua_pushnumber(L, player->getSkillLevel(skillType)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetSkillPercent(lua_State* L) { // player:getSkillPercent(skillType) skills_t skillType = getNumber<skills_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player && skillType <= SKILL_LAST) { lua_pushnumber(L, player->skills[skillType].percent); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetSkillTries(lua_State* L) { // player:getSkillTries(skillType) skills_t skillType = getNumber<skills_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player && skillType <= SKILL_LAST) { lua_pushnumber(L, player->skills[skillType].tries); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddSkillTries(lua_State* L) { // player:addSkillTries(skillType, tries) Player* player = getUserdata<Player>(L, 1); if (player) { skills_t skillType = getNumber<skills_t>(L, 2); uint64_t tries = getNumber<uint64_t>(L, 3); player->addSkillAdvance(skillType, tries); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddOfflineTrainingTime(lua_State* L) { // player:addOfflineTrainingTime(time) Player* player = getUserdata<Player>(L, 1); if (player) { int32_t time = getNumber<int32_t>(L, 2); player->addOfflineTrainingTime(time); player->sendStats(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetOfflineTrainingTime(lua_State* L) { // player:getOfflineTrainingTime() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getOfflineTrainingTime()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerRemoveOfflineTrainingTime(lua_State* L) { // player:removeOfflineTrainingTime(time) Player* player = getUserdata<Player>(L, 1); if (player) { int32_t time = getNumber<int32_t>(L, 2); player->removeOfflineTrainingTime(time); player->sendStats(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddOfflineTrainingTries(lua_State* L) { // player:addOfflineTrainingTries(skillType, tries) Player* player = getUserdata<Player>(L, 1); if (player) { skills_t skillType = getNumber<skills_t>(L, 2); uint64_t tries = getNumber<uint64_t>(L, 3); pushBoolean(L, player->addOfflineTrainingTries(skillType, tries)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetOfflineTrainingSkill(lua_State* L) { // player:getOfflineTrainingSkill() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getOfflineTrainingSkill()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetOfflineTrainingSkill(lua_State* L) { // player:setOfflineTrainingSkill(skillId) Player* player = getUserdata<Player>(L, 1); if (player) { uint32_t skillId = getNumber<uint32_t>(L, 2); player->setOfflineTrainingSkill(skillId); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetItemCount(lua_State* L) { // player:getItemCount(itemId[, subType = -1]) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } int32_t subType = getNumber<int32_t>(L, 3, -1); lua_pushnumber(L, player->getItemTypeCount(itemId, subType)); return 1; } int LuaScriptInterface::luaPlayerGetItemById(lua_State* L) { // player:getItemById(itemId, deepSearch[, subType = -1]) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } bool deepSearch = getBoolean(L, 3); int32_t subType = getNumber<int32_t>(L, 4, -1); Item* item = g_game.findItemOfType(player, itemId, deepSearch, subType); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetVocation(lua_State* L) { // player:getVocation() Player* player = getUserdata<Player>(L, 1); if (player) { pushUserdata<Vocation>(L, player->getVocation()); setMetatable(L, -1, "Vocation"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetVocation(lua_State* L) { // player:setVocation(id or name or userdata) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Vocation* vocation; if (isNumber(L, 2)) { vocation = g_vocations.getVocation(getNumber<uint16_t>(L, 2)); } else if (isString(L, 2)) { vocation = g_vocations.getVocation(g_vocations.getVocationId(getString(L, 2))); } else if (isUserdata(L, 2)) { vocation = getUserdata<Vocation>(L, 2); } else { vocation = nullptr; } if (!vocation) { pushBoolean(L, false); return 1; } player->setVocation(vocation->getId()); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerGetSex(lua_State* L) { // player:getSex() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getSex()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetSex(lua_State* L) { // player:setSex(newSex) Player* player = getUserdata<Player>(L, 1); if (player) { PlayerSex_t newSex = getNumber<PlayerSex_t>(L, 2); player->setSex(newSex); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetTown(lua_State* L) { // player:getTown() Player* player = getUserdata<Player>(L, 1); if (player) { pushUserdata<Town>(L, player->getTown()); setMetatable(L, -1, "Town"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetTown(lua_State* L) { // player:setTown(town) Town* town = getUserdata<Town>(L, 2); if (!town) { pushBoolean(L, false); return 1; } Player* player = getUserdata<Player>(L, 1); if (player) { player->setTown(town); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetGuild(lua_State* L) { // player:getGuild() Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Guild* guild = player->getGuild(); if (!guild) { lua_pushnil(L); return 1; } pushUserdata<Guild>(L, guild); setMetatable(L, -1, "Guild"); return 1; } int LuaScriptInterface::luaPlayerSetGuild(lua_State* L) { // player:setGuild(guild) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } player->setGuild(getUserdata<Guild>(L, 2)); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerGetGuildLevel(lua_State* L) { // player:getGuildLevel() Player* player = getUserdata<Player>(L, 1); if (player && player->getGuild()) { lua_pushnumber(L, player->getGuildRank()->level); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetGuildLevel(lua_State* L) { // player:setGuildLevel(level) uint8_t level = getNumber<uint8_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (!player || !player->getGuild()) { lua_pushnil(L); return 1; } const GuildRank* rank = player->getGuild()->getRankByLevel(level); if (!rank) { pushBoolean(L, false); } else { player->setGuildRank(rank); pushBoolean(L, true); } return 1; } int LuaScriptInterface::luaPlayerGetGuildNick(lua_State* L) { // player:getGuildNick() Player* player = getUserdata<Player>(L, 1); if (player) { pushString(L, player->getGuildNick()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetGuildNick(lua_State* L) { // player:setGuildNick(nick) const std::string& nick = getString(L, 2); Player* player = getUserdata<Player>(L, 1); if (player) { player->setGuildNick(nick); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetGroup(lua_State* L) { // player:getGroup() Player* player = getUserdata<Player>(L, 1); if (player) { pushUserdata<Group>(L, player->getGroup()); setMetatable(L, -1, "Group"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetGroup(lua_State* L) { // player:setGroup(group) Group* group = getUserdata<Group>(L, 2); if (!group) { pushBoolean(L, false); return 1; } Player* player = getUserdata<Player>(L, 1); if (player) { player->setGroup(group); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetStamina(lua_State* L) { // player:getStamina() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getStaminaMinutes()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetStamina(lua_State* L) { // player:setStamina(stamina) uint16_t stamina = getNumber<uint16_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player) { player->staminaMinutes = std::min<uint16_t>(2520, stamina); player->sendStats(); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetSoul(lua_State* L) { // player:getSoul() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getSoul()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddSoul(lua_State* L) { // player:addSoul(soulChange) int32_t soulChange = getNumber<int32_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player) { player->changeSoul(soulChange); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetMaxSoul(lua_State* L) { // player:getMaxSoul() Player* player = getUserdata<Player>(L, 1); if (player && player->vocation) { lua_pushnumber(L, player->vocation->getSoulMax()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetBankBalance(lua_State* L) { // player:getBankBalance() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getBankBalance()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetBankBalance(lua_State* L) { // player:setBankBalance(bankBalance) Player* player = getUserdata<Player>(L, 1); if (player) { player->setBankBalance(getNumber<uint64_t>(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetStorageValue(lua_State* L) { // player:getStorageValue(key) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint32_t key = getNumber<uint32_t>(L, 2); int32_t value; if (player->getStorageValue(key, value)) { lua_pushnumber(L, value); } else { lua_pushnumber(L, -1); } return 1; } int LuaScriptInterface::luaPlayerSetStorageValue(lua_State* L) { // player:setStorageValue(key, value) int32_t value = getNumber<int32_t>(L, 3); uint32_t key = getNumber<uint32_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) { std::ostringstream ss; ss << "Accessing reserved range: " << key; reportErrorFunc(ss.str()); pushBoolean(L, false); return 1; } if (player) { player->addStorageValue(key, value); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddItem(lua_State* L) { // player:addItem(itemId[, count = 1[, canDropOnMap = true[, subType = 1[, slot = CONST_SLOT_WHEREEVER]]]]) Player* player = getUserdata<Player>(L, 1); if (!player) { pushBoolean(L, false); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } int32_t count = getNumber<int32_t>(L, 3, 1); int32_t subType = getNumber<int32_t>(L, 5, 1); const ItemType& it = Item::items[itemId]; int32_t itemCount = 1; int parameters = lua_gettop(L); if (parameters >= 4) { itemCount = std::max<int32_t>(1, count); } else if (it.hasSubType()) { if (it.stackable) { itemCount = std::ceil(count / 100.f); } subType = count; } else { itemCount = std::max<int32_t>(1, count); } bool hasTable = itemCount > 1; if (hasTable) { lua_newtable(L); } else if (itemCount == 0) { lua_pushnil(L); return 1; } bool canDropOnMap = getBoolean(L, 4, true); slots_t slot = getNumber<slots_t>(L, 6, CONST_SLOT_WHEREEVER); for (int32_t i = 1; i <= itemCount; ++i) { int32_t stackCount = subType; if (it.stackable) { stackCount = std::min<int32_t>(stackCount, 100); subType -= stackCount; } Item* item = Item::CreateItem(itemId, stackCount); if (!item) { if (!hasTable) { lua_pushnil(L); } return 1; } ReturnValue ret = g_game.internalPlayerAddItem(player, item, canDropOnMap, slot); if (ret != RETURNVALUE_NOERROR) { delete item; if (!hasTable) { lua_pushnil(L); } return 1; } if (hasTable) { lua_pushnumber(L, i); pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); lua_settable(L, -3); } else { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } } return 1; } int LuaScriptInterface::luaPlayerAddItemEx(lua_State* L) { // player:addItemEx(item[, canDropOnMap = false[, index = INDEX_WHEREEVER[, flags = 0]]]) // player:addItemEx(item[, canDropOnMap = true[, slot = CONST_SLOT_WHEREEVER]]) Item* item = getUserdata<Item>(L, 2); if (!item) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } if (item->getParent() != VirtualCylinder::virtualCylinder) { reportErrorFunc("Item already has a parent"); pushBoolean(L, false); return 1; } bool canDropOnMap = getBoolean(L, 3, false); ReturnValue returnValue; if (canDropOnMap) { slots_t slot = getNumber<slots_t>(L, 4, CONST_SLOT_WHEREEVER); returnValue = g_game.internalPlayerAddItem(player, item, true, slot); } else { int32_t index = getNumber<int32_t>(L, 4, INDEX_WHEREEVER); uint32_t flags = getNumber<uint32_t>(L, 5, 0); returnValue = g_game.internalAddItem(player, item, index, flags); } if (returnValue == RETURNVALUE_NOERROR) { ScriptEnvironment::removeTempItem(item); } lua_pushnumber(L, returnValue); return 1; } int LuaScriptInterface::luaPlayerRemoveItem(lua_State* L) { // player:removeItem(itemId, count[, subType = -1[, ignoreEquipped = false]]) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } uint32_t count = getNumber<uint32_t>(L, 3); int32_t subType = getNumber<int32_t>(L, 4, -1); bool ignoreEquipped = getBoolean(L, 5, false); pushBoolean(L, player->removeItemOfType(itemId, count, subType, ignoreEquipped)); return 1; } int LuaScriptInterface::luaPlayerGetMoney(lua_State* L) { // player:getMoney() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getMoney()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddMoney(lua_State* L) { // player:addMoney(money) uint64_t money = getNumber<uint64_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player) { g_game.addMoney(player, money); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerRemoveMoney(lua_State* L) { // player:removeMoney(money) Player* player = getUserdata<Player>(L, 1); if (player) { uint64_t money = getNumber<uint64_t>(L, 2); pushBoolean(L, g_game.removeMoney(player, money)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerShowTextDialog(lua_State* L) { // player:showTextDialog(itemId[, text[, canWrite[, length]]]) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } int32_t length = getNumber<int32_t>(L, 5, -1); bool canWrite = getBoolean(L, 4, false); std::string text; int parameters = lua_gettop(L); if (parameters >= 3) { text = getString(L, 3); } uint16_t itemId; if (isNumber(L, 2)) { itemId = getNumber<uint16_t>(L, 2); } else { itemId = Item::items.getItemIdByName(getString(L, 2)); if (itemId == 0) { lua_pushnil(L); return 1; } } Item* item = Item::CreateItem(itemId); if (!item) { reportErrorFunc(getErrorDesc(LUA_ERROR_ITEM_NOT_FOUND)); pushBoolean(L, false); return 1; } if (length < 0) { length = Item::items[item->getID()].maxTextLen; } if (!text.empty()) { item->setText(text); length = std::max<int32_t>(text.size(), length); } item->setParent(player); player->setWriteItem(item, length); player->sendTextWindow(item, length, canWrite); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerSendTextMessage(lua_State* L) { // player:sendTextMessage(type, text[, position, primaryValue = 0, primaryColor = TEXTCOLOR_NONE[, secondaryValue = 0, secondaryColor = TEXTCOLOR_NONE]]) int parameters = lua_gettop(L); TextMessage message(getNumber<MessageClasses>(L, 2), getString(L, 3)); if (parameters >= 6) { message.position = getPosition(L, 4); message.primary.value = getNumber<int32_t>(L, 5); message.primary.color = getNumber<TextColor_t>(L, 6); } if (parameters >= 8) { message.secondary.value = getNumber<int32_t>(L, 7); message.secondary.color = getNumber<TextColor_t>(L, 8); } Player* player = getUserdata<Player>(L, 1); if (player) { player->sendTextMessage(message); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSendChannelMessage(lua_State* L) { // player:sendChannelMessage(author, text, type, channelId) uint16_t channelId = getNumber<uint16_t>(L, 5); SpeakClasses type = getNumber<SpeakClasses>(L, 4); const std::string& text = getString(L, 3); const std::string& author = getString(L, 2); Player* player = getUserdata<Player>(L, 1); if (player) { player->sendChannelMessage(author, text, type, channelId); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSendPrivateMessage(lua_State* L) { // player:sendPrivateMessage(speaker, text[, type]) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } const Player* speaker = getUserdata<const Player>(L, 2); const std::string& text = getString(L, 3); SpeakClasses type = getNumber<SpeakClasses>(L, 4, TALKTYPE_PRIVATE_FROM); player->sendPrivateMessage(speaker, type, text); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerChannelSay(lua_State* L) { // player:channelSay(speaker, type, text, channelId) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Creature* speaker = getCreature(L, 2); SpeakClasses type = getNumber<SpeakClasses>(L, 3); const std::string& text = getString(L, 4); uint16_t channelId = getNumber<uint16_t>(L, 5); player->sendToChannel(speaker, type, text, channelId); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerOpenChannel(lua_State* L) { // player:openChannel(channelId) uint16_t channelId = getNumber<uint16_t>(L, 2); Player* player = getUserdata<Player>(L, 1); if (player) { g_game.playerOpenChannel(player->getID(), channelId); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetSlotItem(lua_State* L) { // player:getSlotItem(slot) const Player* player = getUserdata<const Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint32_t slot = getNumber<uint32_t>(L, 2); Thing* thing = player->getThing(slot); if (!thing) { lua_pushnil(L); return 1; } Item* item = thing->getItem(); if (item) { pushUserdata<Item>(L, item); setItemMetatable(L, -1, item); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetParty(lua_State* L) { // player:getParty() const Player* player = getUserdata<const Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Party* party = player->getParty(); if (party) { pushUserdata<Party>(L, party); setMetatable(L, -1, "Party"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddOutfit(lua_State* L) { // player:addOutfit(lookType) Player* player = getUserdata<Player>(L, 1); if (player) { player->addOutfit(getNumber<uint16_t>(L, 2), 0); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddOutfitAddon(lua_State* L) { // player:addOutfitAddon(lookType, addon) Player* player = getUserdata<Player>(L, 1); if (player) { uint16_t lookType = getNumber<uint16_t>(L, 2); uint8_t addon = getNumber<uint8_t>(L, 3); player->addOutfit(lookType, addon); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerRemoveOutfit(lua_State* L) { // player:removeOutfit(lookType) Player* player = getUserdata<Player>(L, 1); if (player) { uint16_t lookType = getNumber<uint16_t>(L, 2); pushBoolean(L, player->removeOutfit(lookType)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerRemoveOutfitAddon(lua_State* L) { // player:removeOutfitAddon(lookType, addon) Player* player = getUserdata<Player>(L, 1); if (player) { uint16_t lookType = getNumber<uint16_t>(L, 2); uint8_t addon = getNumber<uint8_t>(L, 3); pushBoolean(L, player->removeOutfitAddon(lookType, addon)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerHasOutfit(lua_State* L) { // player:hasOutfit(lookType[, addon = 0]) Player* player = getUserdata<Player>(L, 1); if (player) { uint16_t lookType = getNumber<uint16_t>(L, 2); uint8_t addon = getNumber<uint8_t>(L, 3, 0); pushBoolean(L, player->canWear(lookType, addon)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSendOutfitWindow(lua_State* L) { // player:sendOutfitWindow() Player* player = getUserdata<Player>(L, 1); if (player) { player->sendOutfitWindow(); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddMount(lua_State* L) { // player:addMount(mountId) Player* player = getUserdata<Player>(L, 1); if (player) { uint8_t mountId = getNumber<uint8_t>(L, 2); pushBoolean(L, player->tameMount(mountId)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerRemoveMount(lua_State* L) { // player:removeMount(mountId) Player* player = getUserdata<Player>(L, 1); if (player) { uint8_t mountId = getNumber<uint8_t>(L, 2); pushBoolean(L, player->untameMount(mountId)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerHasMount(lua_State* L) { // player:hasMount(mountId) const Player* player = getUserdata<const Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint8_t mountId = getNumber<uint8_t>(L, 2); Mount* mount = g_game.mounts.getMountByID(mountId); if (mount) { pushBoolean(L, player->hasMount(mount)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetPremiumDays(lua_State* L) { // player:getPremiumDays() Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->premiumDays); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddPremiumDays(lua_State* L) { // player:addPremiumDays(days) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } if (player->premiumDays != std::numeric_limits<uint16_t>::max()) { uint16_t days = getNumber<uint16_t>(L, 2); int32_t addDays = std::min<int32_t>(0xFFFE - player->premiumDays, days); if (addDays > 0) { player->setPremiumDays(player->premiumDays + addDays); IOLoginData::addPremiumDays(player->getAccount(), addDays); } } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerRemovePremiumDays(lua_State* L) { // player:removePremiumDays(days) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } if (player->premiumDays != std::numeric_limits<uint16_t>::max()) { uint16_t days = getNumber<uint16_t>(L, 2); int32_t removeDays = std::min<int32_t>(player->premiumDays, days); if (removeDays > 0) { player->setPremiumDays(player->premiumDays - removeDays); IOLoginData::removePremiumDays(player->getAccount(), removeDays); } } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerHasBlessing(lua_State* L) { // player:hasBlessing(blessing) uint8_t blessing = getNumber<uint8_t>(L, 2) - 1; Player* player = getUserdata<Player>(L, 1); if (player) { pushBoolean(L, player->hasBlessing(blessing)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddBlessing(lua_State* L) { // player:addBlessing(blessing) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint8_t blessing = getNumber<uint8_t>(L, 2) - 1; if (player->hasBlessing(blessing)) { pushBoolean(L, false); return 1; } player->addBlessing(1 << blessing); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerRemoveBlessing(lua_State* L) { // player:removeBlessing(blessing) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } uint8_t blessing = getNumber<uint8_t>(L, 2) - 1; if (!player->hasBlessing(blessing)) { pushBoolean(L, false); return 1; } player->removeBlessing(1 << blessing); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerCanLearnSpell(lua_State* L) { // player:canLearnSpell(spellName) const Player* player = getUserdata<const Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } const std::string& spellName = getString(L, 2); InstantSpell* spell = g_spells->getInstantSpellByName(spellName); if (!spell) { reportErrorFunc("Spell \"" + spellName + "\" not found"); pushBoolean(L, false); return 1; } if (player->hasFlag(PlayerFlag_IgnoreSpellCheck)) { pushBoolean(L, true); return 1; } const auto& vocMap = spell->getVocMap(); if (vocMap.count(player->getVocationId()) == 0) { pushBoolean(L, false); } else if (player->getLevel() < spell->getLevel()) { pushBoolean(L, false); } else if (player->getMagicLevel() < spell->getMagicLevel()) { pushBoolean(L, false); } else { pushBoolean(L, true); } return 1; } int LuaScriptInterface::luaPlayerLearnSpell(lua_State* L) { // player:learnSpell(spellName) Player* player = getUserdata<Player>(L, 1); if (player) { const std::string& spellName = getString(L, 2); player->learnInstantSpell(spellName); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerForgetSpell(lua_State* L) { // player:forgetSpell(spellName) Player* player = getUserdata<Player>(L, 1); if (player) { const std::string& spellName = getString(L, 2); player->forgetInstantSpell(spellName); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerHasLearnedSpell(lua_State* L) { // player:hasLearnedSpell(spellName) Player* player = getUserdata<Player>(L, 1); if (player) { const std::string& spellName = getString(L, 2); pushBoolean(L, player->hasLearnedInstantSpell(spellName)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSendTutorial(lua_State* L) { // player:sendTutorial(tutorialId) Player* player = getUserdata<Player>(L, 1); if (player) { uint8_t tutorialId = getNumber<uint8_t>(L, 2); player->sendTutorial(tutorialId); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerAddMapMark(lua_State* L) { // player:addMapMark(position, type, description) Player* player = getUserdata<Player>(L, 1); if (player) { const Position& position = getPosition(L, 2); uint8_t type = getNumber<uint8_t>(L, 3); const std::string& description = getString(L, 4); player->sendAddMarker(position, type, description); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSave(lua_State* L) { // player:save() Player* player = getUserdata<Player>(L, 1); if (player) { player->loginPosition = player->getPosition(); pushBoolean(L, IOLoginData::savePlayer(player)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerPopupFYI(lua_State* L) { // player:popupFYI(message) Player* player = getUserdata<Player>(L, 1); if (player) { const std::string& message = getString(L, 2); player->sendFYIBox(message); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerIsPzLocked(lua_State* L) { // player:isPzLocked() Player* player = getUserdata<Player>(L, 1); if (player) { pushBoolean(L, player->isPzLocked()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetClient(lua_State* L) { // player:getClient() Player* player = getUserdata<Player>(L, 1); if (player) { lua_createtable(L, 0, 2); setField(L, "version", player->getProtocolVersion()); setField(L, "os", player->getOperatingSystem()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetHouse(lua_State* L) { // player:getHouse() Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } House* house = g_game.map.houses.getHouseByPlayerId(player->getGUID()); if (house) { pushUserdata<House>(L, house); setMetatable(L, -1, "House"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerSetGhostMode(lua_State* L) { // player:setGhostMode(enabled) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } bool enabled = getBoolean(L, 2); if (player->isInGhostMode() == enabled) { pushBoolean(L, true); return 1; } player->switchGhostMode(); Tile* tile = player->getTile(); const Position& position = player->getPosition(); SpectatorHashSet spectators; g_game.map.getSpectators(spectators, position, true, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer != player && !tmpPlayer->isAccessPlayer()) { if (enabled) { tmpPlayer->sendRemoveTileThing(position, tile->getStackposOfCreature(tmpPlayer, player)); } else { tmpPlayer->sendCreatureAppear(player, position, true); } } else { tmpPlayer->sendCreatureChangeVisible(player, !enabled); } } if (player->isInGhostMode()) { for (const auto& it : g_game.getPlayers()) { if (!it.second->isAccessPlayer()) { it.second->notifyStatusChange(player, VIPSTATUS_OFFLINE); } } IOLoginData::updateOnlineStatus(player->getGUID(), false); } else { for (const auto& it : g_game.getPlayers()) { if (!it.second->isAccessPlayer()) { it.second->notifyStatusChange(player, VIPSTATUS_ONLINE); } } IOLoginData::updateOnlineStatus(player->getGUID(), true); } pushBoolean(L, true); return 1; } int LuaScriptInterface::luaPlayerGetContainerId(lua_State* L) { // player:getContainerId(container) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Container* container = getUserdata<Container>(L, 2); if (container) { lua_pushnumber(L, player->getContainerID(container)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetContainerById(lua_State* L) { // player:getContainerById(id) Player* player = getUserdata<Player>(L, 1); if (!player) { lua_pushnil(L); return 1; } Container* container = player->getContainerByID(getNumber<uint8_t>(L, 2)); if (container) { pushUserdata<Container>(L, container); setMetatable(L, -1, "Container"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPlayerGetContainerIndex(lua_State* L) { // player:getContainerIndex(id) Player* player = getUserdata<Player>(L, 1); if (player) { lua_pushnumber(L, player->getContainerIndex(getNumber<uint8_t>(L, 2))); } else { lua_pushnil(L); } return 1; } // Monster int LuaScriptInterface::luaMonsterCreate(lua_State* L) { // Monster(id or userdata) Monster* monster; if (isNumber(L, 2)) { monster = g_game.getMonsterByID(getNumber<uint32_t>(L, 2)); } else if (isUserdata(L, 2)) { if (getUserdataType(L, 2) != LuaData_Monster) { lua_pushnil(L); return 1; } monster = getUserdata<Monster>(L, 2); } else { monster = nullptr; } if (monster) { pushUserdata<Monster>(L, monster); setMetatable(L, -1, "Monster"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterIsMonster(lua_State* L) { // monster:isMonster() pushBoolean(L, getUserdata<const Monster>(L, 1) != nullptr); return 1; } int LuaScriptInterface::luaMonsterGetType(lua_State* L) { // monster:getType() const Monster* monster = getUserdata<const Monster>(L, 1); if (monster) { pushUserdata<MonsterType>(L, monster->mType); setMetatable(L, -1, "MonsterType"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterGetSpawnPosition(lua_State* L) { // monster:getSpawnPosition() const Monster* monster = getUserdata<const Monster>(L, 1); if (monster) { pushPosition(L, monster->getMasterPos()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterIsInSpawnRange(lua_State* L) { // monster:isInSpawnRange([position]) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { pushBoolean(L, monster->isInSpawnRange(lua_gettop(L) >= 2 ? getPosition(L, 2) : monster->getPosition())); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterIsIdle(lua_State* L) { // monster:isIdle() Monster* monster = getUserdata<Monster>(L, 1); if (monster) { pushBoolean(L, monster->getIdleStatus()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterSetIdle(lua_State* L) { // monster:setIdle(idle) Monster* monster = getUserdata<Monster>(L, 1); if (!monster) { lua_pushnil(L); return 1; } monster->setIdle(getBoolean(L, 2)); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaMonsterIsTarget(lua_State* L) { // monster:isTarget(creature) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { const Creature* creature = getCreature(L, 2); pushBoolean(L, monster->isTarget(creature)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterIsOpponent(lua_State* L) { // monster:isOpponent(creature) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { const Creature* creature = getCreature(L, 2); pushBoolean(L, monster->isOpponent(creature)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterIsFriend(lua_State* L) { // monster:isFriend(creature) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { const Creature* creature = getCreature(L, 2); pushBoolean(L, monster->isFriend(creature)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterAddFriend(lua_State* L) { // monster:addFriend(creature) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { Creature* creature = getCreature(L, 2); monster->addFriend(creature); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterRemoveFriend(lua_State* L) { // monster:removeFriend(creature) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { Creature* creature = getCreature(L, 2); monster->removeFriend(creature); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterGetFriendList(lua_State* L) { // monster:getFriendList() Monster* monster = getUserdata<Monster>(L, 1); if (!monster) { lua_pushnil(L); return 1; } const auto& friendList = monster->getFriendList(); lua_createtable(L, friendList.size(), 0); int index = 0; for (Creature* creature : friendList) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterGetFriendCount(lua_State* L) { // monster:getFriendCount() Monster* monster = getUserdata<Monster>(L, 1); if (monster) { lua_pushnumber(L, monster->getFriendList().size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterAddTarget(lua_State* L) { // monster:addTarget(creature[, pushFront = false]) Monster* monster = getUserdata<Monster>(L, 1); if (!monster) { lua_pushnil(L); return 1; } Creature* creature = getCreature(L, 2); bool pushFront = getBoolean(L, 3, false); monster->addTarget(creature, pushFront); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaMonsterRemoveTarget(lua_State* L) { // monster:removeTarget(creature) Monster* monster = getUserdata<Monster>(L, 1); if (!monster) { lua_pushnil(L); return 1; } monster->removeTarget(getCreature(L, 2)); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaMonsterGetTargetList(lua_State* L) { // monster:getTargetList() Monster* monster = getUserdata<Monster>(L, 1); if (!monster) { lua_pushnil(L); return 1; } const auto& targetList = monster->getTargetList(); lua_createtable(L, targetList.size(), 0); int index = 0; for (Creature* creature : targetList) { pushUserdata<Creature>(L, creature); setCreatureMetatable(L, -1, creature); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterGetTargetCount(lua_State* L) { // monster:getTargetCount() Monster* monster = getUserdata<Monster>(L, 1); if (monster) { lua_pushnumber(L, monster->getTargetList().size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterSelectTarget(lua_State* L) { // monster:selectTarget(creature) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { Creature* creature = getCreature(L, 2); pushBoolean(L, monster->selectTarget(creature)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterSearchTarget(lua_State* L) { // monster:searchTarget([searchType = TARGETSEARCH_DEFAULT]) Monster* monster = getUserdata<Monster>(L, 1); if (monster) { TargetSearchType_t searchType = getNumber<TargetSearchType_t>(L, 2, TARGETSEARCH_DEFAULT); pushBoolean(L, monster->searchTarget(searchType)); } else { lua_pushnil(L); } return 1; } // Npc int LuaScriptInterface::luaNpcCreate(lua_State* L) { // Npc([id or name or userdata]) Npc* npc; if (lua_gettop(L) >= 2) { if (isNumber(L, 2)) { npc = g_game.getNpcByID(getNumber<uint32_t>(L, 2)); } else if (isString(L, 2)) { npc = g_game.getNpcByName(getString(L, 2)); } else if (isUserdata(L, 2)) { if (getUserdataType(L, 2) != LuaData_Npc) { lua_pushnil(L); return 1; } npc = getUserdata<Npc>(L, 2); } else { npc = nullptr; } } else { npc = getScriptEnv()->getNpc(); } if (npc) { pushUserdata<Npc>(L, npc); setMetatable(L, -1, "Npc"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNpcIsNpc(lua_State* L) { // npc:isNpc() pushBoolean(L, getUserdata<const Npc>(L, 1) != nullptr); return 1; } int LuaScriptInterface::luaNpcSetMasterPos(lua_State* L) { // npc:setMasterPos(pos[, radius]) Npc* npc = getUserdata<Npc>(L, 1); if (!npc) { lua_pushnil(L); return 1; } const Position& pos = getPosition(L, 2); int32_t radius = getNumber<int32_t>(L, 3, 1); npc->setMasterPos(pos, radius); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaNpcGetSpeechBubble(lua_State* L) { // npc:getSpeechBubble() Npc* npc = getUserdata<Npc>(L, 1); if (npc) { lua_pushnumber(L, npc->getSpeechBubble()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaNpcSetSpeechBubble(lua_State* L) { // npc:setSpeechBubble(speechBubble) Npc* npc = getUserdata<Npc>(L, 1); if (npc) { npc->setSpeechBubble(getNumber<uint8_t>(L, 2)); } return 0; } // Guild int LuaScriptInterface::luaGuildCreate(lua_State* L) { // Guild(id) uint32_t id = getNumber<uint32_t>(L, 2); Guild* guild = g_game.getGuild(id); if (guild) { pushUserdata<Guild>(L, guild); setMetatable(L, -1, "Guild"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildGetId(lua_State* L) { // guild:getId() Guild* guild = getUserdata<Guild>(L, 1); if (guild) { lua_pushnumber(L, guild->getId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildGetName(lua_State* L) { // guild:getName() Guild* guild = getUserdata<Guild>(L, 1); if (guild) { pushString(L, guild->getName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildGetMembersOnline(lua_State* L) { // guild:getMembersOnline() const Guild* guild = getUserdata<const Guild>(L, 1); if (!guild) { lua_pushnil(L); return 1; } const auto& members = guild->getMembersOnline(); lua_createtable(L, members.size(), 0); int index = 0; for (Player* player : members) { pushUserdata<Player>(L, player); setMetatable(L, -1, "Player"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaGuildAddRank(lua_State* L) { // guild:addRank(id, name, level) Guild* guild = getUserdata<Guild>(L, 1); if (guild) { uint32_t id = getNumber<uint32_t>(L, 2); const std::string& name = getString(L, 3); uint8_t level = getNumber<uint8_t>(L, 4); guild->addRank(id, name, level); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildGetRankById(lua_State* L) { // guild:getRankById(id) Guild* guild = getUserdata<Guild>(L, 1); if (!guild) { lua_pushnil(L); return 1; } uint32_t id = getNumber<uint32_t>(L, 2); GuildRank* rank = guild->getRankById(id); if (rank) { lua_createtable(L, 0, 3); setField(L, "id", rank->id); setField(L, "name", rank->name); setField(L, "level", rank->level); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildGetRankByLevel(lua_State* L) { // guild:getRankByLevel(level) const Guild* guild = getUserdata<const Guild>(L, 1); if (!guild) { lua_pushnil(L); return 1; } uint8_t level = getNumber<uint8_t>(L, 2); const GuildRank* rank = guild->getRankByLevel(level); if (rank) { lua_createtable(L, 0, 3); setField(L, "id", rank->id); setField(L, "name", rank->name); setField(L, "level", rank->level); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildGetMotd(lua_State* L) { // guild:getMotd() Guild* guild = getUserdata<Guild>(L, 1); if (guild) { pushString(L, guild->getMotd()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGuildSetMotd(lua_State* L) { // guild:setMotd(motd) const std::string& motd = getString(L, 2); Guild* guild = getUserdata<Guild>(L, 1); if (guild) { guild->setMotd(motd); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } // Group int LuaScriptInterface::luaGroupCreate(lua_State* L) { // Group(id) uint32_t id = getNumber<uint32_t>(L, 2); Group* group = g_game.groups.getGroup(id); if (group) { pushUserdata<Group>(L, group); setMetatable(L, -1, "Group"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGroupGetId(lua_State* L) { // group:getId() Group* group = getUserdata<Group>(L, 1); if (group) { lua_pushnumber(L, group->id); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGroupGetName(lua_State* L) { // group:getName() Group* group = getUserdata<Group>(L, 1); if (group) { pushString(L, group->name); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGroupGetFlags(lua_State* L) { // group:getFlags() Group* group = getUserdata<Group>(L, 1); if (group) { lua_pushnumber(L, group->flags); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGroupGetAccess(lua_State* L) { // group:getAccess() Group* group = getUserdata<Group>(L, 1); if (group) { pushBoolean(L, group->access); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGroupGetMaxDepotItems(lua_State* L) { // group:getMaxDepotItems() Group* group = getUserdata<Group>(L, 1); if (group) { lua_pushnumber(L, group->maxDepotItems); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaGroupGetMaxVipEntries(lua_State* L) { // group:getMaxVipEntries() Group* group = getUserdata<Group>(L, 1); if (group) { lua_pushnumber(L, group->maxVipEntries); } else { lua_pushnil(L); } return 1; } // Vocation int LuaScriptInterface::luaVocationCreate(lua_State* L) { // Vocation(id or name) uint32_t id; if (isNumber(L, 2)) { id = getNumber<uint32_t>(L, 2); } else { id = g_vocations.getVocationId(getString(L, 2)); } Vocation* vocation = g_vocations.getVocation(id); if (vocation) { pushUserdata<Vocation>(L, vocation); setMetatable(L, -1, "Vocation"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetId(lua_State* L) { // vocation:getId() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetClientId(lua_State* L) { // vocation:getClientId() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getClientId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetName(lua_State* L) { // vocation:getName() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { pushString(L, vocation->getVocName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetDescription(lua_State* L) { // vocation:getDescription() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { pushString(L, vocation->getVocDescription()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetRequiredSkillTries(lua_State* L) { // vocation:getRequiredSkillTries(skillType, skillLevel) Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { skills_t skillType = getNumber<skills_t>(L, 2); uint16_t skillLevel = getNumber<uint16_t>(L, 3); lua_pushnumber(L, vocation->getReqSkillTries(skillType, skillLevel)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetRequiredManaSpent(lua_State* L) { // vocation:getRequiredManaSpent(magicLevel) Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { uint32_t magicLevel = getNumber<uint32_t>(L, 2); lua_pushnumber(L, vocation->getReqMana(magicLevel)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetCapacityGain(lua_State* L) { // vocation:getCapacityGain() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getCapGain()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetHealthGain(lua_State* L) { // vocation:getHealthGain() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getHPGain()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetHealthGainTicks(lua_State* L) { // vocation:getHealthGainTicks() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getHealthGainTicks()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetHealthGainAmount(lua_State* L) { // vocation:getHealthGainAmount() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getHealthGainAmount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetManaGain(lua_State* L) { // vocation:getManaGain() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getManaGain()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetManaGainTicks(lua_State* L) { // vocation:getManaGainTicks() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getManaGainTicks()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetManaGainAmount(lua_State* L) { // vocation:getManaGainAmount() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getManaGainAmount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetMaxSoul(lua_State* L) { // vocation:getMaxSoul() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getSoulMax()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetSoulGainTicks(lua_State* L) { // vocation:getSoulGainTicks() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getSoulGainTicks()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetAttackSpeed(lua_State* L) { // vocation:getAttackSpeed() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getAttackSpeed()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetBaseSpeed(lua_State* L) { // vocation:getBaseSpeed() Vocation* vocation = getUserdata<Vocation>(L, 1); if (vocation) { lua_pushnumber(L, vocation->getBaseSpeed()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetDemotion(lua_State* L) { // vocation:getDemotion() Vocation* vocation = getUserdata<Vocation>(L, 1); if (!vocation) { lua_pushnil(L); return 1; } uint16_t fromId = vocation->getFromVocation(); if (fromId == VOCATION_NONE) { lua_pushnil(L); return 1; } Vocation* demotedVocation = g_vocations.getVocation(fromId); if (demotedVocation && demotedVocation != vocation) { pushUserdata<Vocation>(L, demotedVocation); setMetatable(L, -1, "Vocation"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaVocationGetPromotion(lua_State* L) { // vocation:getPromotion() Vocation* vocation = getUserdata<Vocation>(L, 1); if (!vocation) { lua_pushnil(L); return 1; } uint16_t promotedId = g_vocations.getPromotedVocation(vocation->getId()); if (promotedId == VOCATION_NONE) { lua_pushnil(L); return 1; } Vocation* promotedVocation = g_vocations.getVocation(promotedId); if (promotedVocation && promotedVocation != vocation) { pushUserdata<Vocation>(L, promotedVocation); setMetatable(L, -1, "Vocation"); } else { lua_pushnil(L); } return 1; } // Town int LuaScriptInterface::luaTownCreate(lua_State* L) { // Town(id or name) Town* town; if (isNumber(L, 2)) { town = g_game.map.towns.getTown(getNumber<uint32_t>(L, 2)); } else if (isString(L, 2)) { town = g_game.map.towns.getTown(getString(L, 2)); } else { town = nullptr; } if (town) { pushUserdata<Town>(L, town); setMetatable(L, -1, "Town"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTownGetId(lua_State* L) { // town:getId() Town* town = getUserdata<Town>(L, 1); if (town) { lua_pushnumber(L, town->getID()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTownGetName(lua_State* L) { // town:getName() Town* town = getUserdata<Town>(L, 1); if (town) { pushString(L, town->getName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaTownGetTemplePosition(lua_State* L) { // town:getTemplePosition() Town* town = getUserdata<Town>(L, 1); if (town) { pushPosition(L, town->getTemplePosition()); } else { lua_pushnil(L); } return 1; } // House int LuaScriptInterface::luaHouseCreate(lua_State* L) { // House(id) House* house = g_game.map.houses.getHouse(getNumber<uint32_t>(L, 2)); if (house) { pushUserdata<House>(L, house); setMetatable(L, -1, "House"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetId(lua_State* L) { // house:getId() House* house = getUserdata<House>(L, 1); if (house) { lua_pushnumber(L, house->getId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetName(lua_State* L) { // house:getName() House* house = getUserdata<House>(L, 1); if (house) { pushString(L, house->getName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetTown(lua_State* L) { // house:getTown() House* house = getUserdata<House>(L, 1); if (!house) { lua_pushnil(L); return 1; } Town* town = g_game.map.towns.getTown(house->getTownId()); if (town) { pushUserdata<Town>(L, town); setMetatable(L, -1, "Town"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetExitPosition(lua_State* L) { // house:getExitPosition() House* house = getUserdata<House>(L, 1); if (house) { pushPosition(L, house->getEntryPosition()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetRent(lua_State* L) { // house:getRent() House* house = getUserdata<House>(L, 1); if (house) { lua_pushnumber(L, house->getRent()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetOwnerGuid(lua_State* L) { // house:getOwnerGuid() House* house = getUserdata<House>(L, 1); if (house) { lua_pushnumber(L, house->getOwner()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseSetOwnerGuid(lua_State* L) { // house:setOwnerGuid(guid[, updateDatabase = true]) House* house = getUserdata<House>(L, 1); if (house) { uint32_t guid = getNumber<uint32_t>(L, 2); bool updateDatabase = getBoolean(L, 3, true); house->setOwner(guid, updateDatabase); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetBeds(lua_State* L) { // house:getBeds() House* house = getUserdata<House>(L, 1); if (!house) { lua_pushnil(L); return 1; } const auto& beds = house->getBeds(); lua_createtable(L, beds.size(), 0); int index = 0; for (BedItem* bedItem : beds) { pushUserdata<Item>(L, bedItem); setItemMetatable(L, -1, bedItem); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaHouseGetBedCount(lua_State* L) { // house:getBedCount() House* house = getUserdata<House>(L, 1); if (house) { lua_pushnumber(L, house->getBedCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetDoors(lua_State* L) { // house:getDoors() House* house = getUserdata<House>(L, 1); if (!house) { lua_pushnil(L); return 1; } const auto& doors = house->getDoors(); lua_createtable(L, doors.size(), 0); int index = 0; for (Door* door : doors) { pushUserdata<Item>(L, door); setItemMetatable(L, -1, door); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaHouseGetDoorCount(lua_State* L) { // house:getDoorCount() House* house = getUserdata<House>(L, 1); if (house) { lua_pushnumber(L, house->getDoors().size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetTiles(lua_State* L) { // house:getTiles() House* house = getUserdata<House>(L, 1); if (!house) { lua_pushnil(L); return 1; } const auto& tiles = house->getTiles(); lua_createtable(L, tiles.size(), 0); int index = 0; for (Tile* tile : tiles) { pushUserdata<Tile>(L, tile); setMetatable(L, -1, "Tile"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaHouseGetTileCount(lua_State* L) { // house:getTileCount() House* house = getUserdata<House>(L, 1); if (house) { lua_pushnumber(L, house->getTiles().size()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaHouseGetAccessList(lua_State* L) { // house:getAccessList(listId) House* house = getUserdata<House>(L, 1); if (!house) { lua_pushnil(L); return 1; } std::string list; uint32_t listId = getNumber<uint32_t>(L, 2); if (house->getAccessList(listId, list)) { pushString(L, list); } else { pushBoolean(L, false); } return 1; } int LuaScriptInterface::luaHouseSetAccessList(lua_State* L) { // house:setAccessList(listId, list) House* house = getUserdata<House>(L, 1); if (!house) { lua_pushnil(L); return 1; } uint32_t listId = getNumber<uint32_t>(L, 2); const std::string& list = getString(L, 3); house->setAccessList(listId, list); pushBoolean(L, true); return 1; } // ItemType int LuaScriptInterface::luaItemTypeCreate(lua_State* L) { // ItemType(id or name) uint32_t id; if (isNumber(L, 2)) { id = getNumber<uint32_t>(L, 2); } else { id = Item::items.getItemIdByName(getString(L, 2)); } const ItemType& itemType = Item::items[id]; pushUserdata<const ItemType>(L, &itemType); setMetatable(L, -1, "ItemType"); return 1; } int LuaScriptInterface::luaItemTypeIsCorpse(lua_State* L) { // itemType:isCorpse() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->corpseType != RACE_NONE); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsDoor(lua_State* L) { // itemType:isDoor() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->isDoor()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsContainer(lua_State* L) { // itemType:isContainer() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->isContainer()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsFluidContainer(lua_State* L) { // itemType:isFluidContainer() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->isFluidContainer()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsMovable(lua_State* L) { // itemType:isMovable() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->moveable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsRune(lua_State* L) { // itemType:isRune() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->isRune()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsStackable(lua_State* L) { // itemType:isStackable() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->stackable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsReadable(lua_State* L) { // itemType:isReadable() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->canReadText); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeIsWritable(lua_State* L) { // itemType:isWritable() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->canWriteText); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetType(lua_State* L) { // itemType:getType() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->type); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetId(lua_State* L) { // itemType:getId() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->id); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetClientId(lua_State* L) { // itemType:getClientId() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->clientId); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetName(lua_State* L) { // itemType:getName() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushString(L, itemType->name); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetPluralName(lua_State* L) { // itemType:getPluralName() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushString(L, itemType->getPluralName()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetArticle(lua_State* L) { // itemType:getArticle() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushString(L, itemType->article); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetDescription(lua_State* L) { // itemType:getDescription() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushString(L, itemType->description); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetSlotPosition(lua_State *L) { // itemType:getSlotPosition() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->slotPosition); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetCharges(lua_State* L) { // itemType:getCharges() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->charges); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetFluidSource(lua_State* L) { // itemType:getFluidSource() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->fluidSource); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetCapacity(lua_State* L) { // itemType:getCapacity() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->maxItems); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetWeight(lua_State* L) { // itemType:getWeight([count = 1]) uint16_t count = getNumber<uint16_t>(L, 2, 1); const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (!itemType) { lua_pushnil(L); return 1; } uint64_t weight = static_cast<uint64_t>(itemType->weight) * std::max<int32_t>(1, count); lua_pushnumber(L, weight); return 1; } int LuaScriptInterface::luaItemTypeGetHitChance(lua_State* L) { // itemType:getHitChance() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->hitChance); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetShootRange(lua_State* L) { // itemType:getShootRange() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->shootRange); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetAttack(lua_State* L) { // itemType:getAttack() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->attack); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetDefense(lua_State* L) { // itemType:getDefense() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->defense); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetExtraDefense(lua_State* L) { // itemType:getExtraDefense() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->extraDefense); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetArmor(lua_State* L) { // itemType:getArmor() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->armor); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetWeaponType(lua_State* L) { // itemType:getWeaponType() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->weaponType); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetElementType(lua_State* L) { // itemType:getElementType() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (!itemType) { lua_pushnil(L); return 1; } auto& abilities = itemType->abilities; if (abilities) { lua_pushnumber(L, abilities->elementType); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetElementDamage(lua_State* L) { // itemType:getElementDamage() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (!itemType) { lua_pushnil(L); return 1; } auto& abilities = itemType->abilities; if (abilities) { lua_pushnumber(L, abilities->elementDamage); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetTransformEquipId(lua_State* L) { // itemType:getTransformEquipId() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->transformEquipTo); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetTransformDeEquipId(lua_State* L) { // itemType:getTransformDeEquipId() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->transformDeEquipTo); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetDestroyId(lua_State* L) { // itemType:getDestroyId() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->destroyTo); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetDecayId(lua_State* L) { // itemType:getDecayId() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->decayTo); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeGetRequiredLevel(lua_State* L) { // itemType:getRequiredLevel() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { lua_pushnumber(L, itemType->minReqLevel); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaItemTypeHasSubType(lua_State* L) { // itemType:hasSubType() const ItemType* itemType = getUserdata<const ItemType>(L, 1); if (itemType) { pushBoolean(L, itemType->hasSubType()); } else { lua_pushnil(L); } return 1; } // Combat int LuaScriptInterface::luaCombatCreate(lua_State* L) { // Combat() pushUserdata<Combat>(L, g_luaEnvironment.createCombatObject(getScriptEnv()->getScriptInterface())); setMetatable(L, -1, "Combat"); return 1; } int LuaScriptInterface::luaCombatSetParameter(lua_State* L) { // combat:setParameter(key, value) Combat* combat = getUserdata<Combat>(L, 1); if (!combat) { lua_pushnil(L); return 1; } CombatParam_t key = getNumber<CombatParam_t>(L, 2); uint32_t value; if (isBoolean(L, 3)) { value = getBoolean(L, 3) ? 1 : 0; } else { value = getNumber<uint32_t>(L, 3); } combat->setParam(key, value); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCombatSetFormula(lua_State* L) { // combat:setFormula(type, mina, minb, maxa, maxb) Combat* combat = getUserdata<Combat>(L, 1); if (!combat) { lua_pushnil(L); return 1; } formulaType_t type = getNumber<formulaType_t>(L, 2); double mina = getNumber<double>(L, 3); double minb = getNumber<double>(L, 4); double maxa = getNumber<double>(L, 5); double maxb = getNumber<double>(L, 6); combat->setPlayerCombatValues(type, mina, minb, maxa, maxb); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaCombatSetArea(lua_State* L) { // combat:setArea(area) if (getScriptEnv()->getScriptId() != EVENT_ID_LOADING) { reportErrorFunc("This function can only be used while loading the script."); lua_pushnil(L); return 1; } const AreaCombat* area = g_luaEnvironment.getAreaObject(getNumber<uint32_t>(L, 2)); if (!area) { reportErrorFunc(getErrorDesc(LUA_ERROR_AREA_NOT_FOUND)); lua_pushnil(L); return 1; } Combat* combat = getUserdata<Combat>(L, 1); if (combat) { combat->setArea(new AreaCombat(*area)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCombatSetCondition(lua_State* L) { // combat:setCondition(condition) Condition* condition = getUserdata<Condition>(L, 2); Combat* combat = getUserdata<Combat>(L, 1); if (combat && condition) { combat->setCondition(condition->clone()); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCombatSetCallback(lua_State* L) { // combat:setCallback(key, function) Combat* combat = getUserdata<Combat>(L, 1); if (!combat) { lua_pushnil(L); return 1; } CallBackParam_t key = getNumber<CallBackParam_t>(L, 2); if (!combat->setCallback(key)) { lua_pushnil(L); return 1; } CallBack* callback = combat->getCallback(key); if (!callback) { lua_pushnil(L); return 1; } const std::string& function = getString(L, 3); pushBoolean(L, callback->loadCallBack(getScriptEnv()->getScriptInterface(), function)); return 1; } int LuaScriptInterface::luaCombatSetOrigin(lua_State* L) { // combat:setOrigin(origin) Combat* combat = getUserdata<Combat>(L, 1); if (combat) { combat->setOrigin(getNumber<CombatOrigin>(L, 2)); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaCombatExecute(lua_State* L) { // combat:execute(creature, variant) Combat* combat = getUserdata<Combat>(L, 1); if (!combat) { pushBoolean(L, false); return 1; } Creature* creature = getCreature(L, 2); const LuaVariant& variant = getVariant(L, 3); switch (variant.type) { case VARIANT_NUMBER: { Creature* target = g_game.getCreatureByID(variant.number); if (!target) { pushBoolean(L, false); return 1; } if (combat->hasArea()) { combat->doCombat(creature, target->getPosition()); } else { combat->doCombat(creature, target); } break; } case VARIANT_POSITION: { combat->doCombat(creature, variant.pos); break; } case VARIANT_TARGETPOSITION: { if (combat->hasArea()) { combat->doCombat(creature, variant.pos); } else { combat->postCombatEffects(creature, variant.pos); g_game.addMagicEffect(variant.pos, CONST_ME_POFF); } break; } case VARIANT_STRING: { Player* target = g_game.getPlayerByName(variant.text); if (!target) { pushBoolean(L, false); return 1; } combat->doCombat(creature, target); break; } case VARIANT_NONE: { reportErrorFunc(getErrorDesc(LUA_ERROR_VARIANT_NOT_FOUND)); pushBoolean(L, false); return 1; } default: { break; } } pushBoolean(L, true); return 1; } // Condition int LuaScriptInterface::luaConditionCreate(lua_State* L) { // Condition(conditionType[, conditionId = CONDITIONID_COMBAT]) ConditionType_t conditionType = getNumber<ConditionType_t>(L, 2); ConditionId_t conditionId = getNumber<ConditionId_t>(L, 3, CONDITIONID_COMBAT); Condition* condition = Condition::createCondition(conditionId, conditionType, 0, 0); if (condition) { pushUserdata<Condition>(L, condition); setMetatable(L, -1, "Condition"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionDelete(lua_State* L) { // condition:delete() Condition** conditionPtr = getRawUserdata<Condition>(L, 1); if (conditionPtr && *conditionPtr) { delete *conditionPtr; *conditionPtr = nullptr; } return 0; } int LuaScriptInterface::luaConditionGetId(lua_State* L) { // condition:getId() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { lua_pushnumber(L, condition->getId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionGetSubId(lua_State* L) { // condition:getSubId() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { lua_pushnumber(L, condition->getSubId()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionGetType(lua_State* L) { // condition:getType() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { lua_pushnumber(L, condition->getType()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionGetIcons(lua_State* L) { // condition:getIcons() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { lua_pushnumber(L, condition->getIcons()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionGetEndTime(lua_State* L) { // condition:getEndTime() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { lua_pushnumber(L, condition->getEndTime()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionClone(lua_State* L) { // condition:clone() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { pushUserdata<Condition>(L, condition->clone()); setMetatable(L, -1, "Condition"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionGetTicks(lua_State* L) { // condition:getTicks() Condition* condition = getUserdata<Condition>(L, 1); if (condition) { lua_pushnumber(L, condition->getTicks()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionSetTicks(lua_State* L) { // condition:setTicks(ticks) int32_t ticks = getNumber<int32_t>(L, 2); Condition* condition = getUserdata<Condition>(L, 1); if (condition) { condition->setTicks(ticks); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionSetParameter(lua_State* L) { // condition:setParameter(key, value) Condition* condition = getUserdata<Condition>(L, 1); if (!condition) { lua_pushnil(L); return 1; } ConditionParam_t key = getNumber<ConditionParam_t>(L, 2); int32_t value; if (isBoolean(L, 3)) { value = getBoolean(L, 3) ? 1 : 0; } else { value = getNumber<int32_t>(L, 3); } condition->setParam(key, value); pushBoolean(L, true); return 1; } int LuaScriptInterface::luaConditionSetFormula(lua_State* L) { // condition:setFormula(mina, minb, maxa, maxb) double maxb = getNumber<double>(L, 5); double maxa = getNumber<double>(L, 4); double minb = getNumber<double>(L, 3); double mina = getNumber<double>(L, 2); ConditionSpeed* condition = dynamic_cast<ConditionSpeed*>(getUserdata<Condition>(L, 1)); if (condition) { condition->setFormulaVars(mina, minb, maxa, maxb); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionSetOutfit(lua_State* L) { // condition:setOutfit(outfit) // condition:setOutfit(lookTypeEx, lookType, lookHead, lookBody, lookLegs, lookFeet[, lookAddons[, lookMount]]) Outfit_t outfit; if (isTable(L, 2)) { outfit = getOutfit(L, 2); } else { outfit.lookMount = getNumber<uint16_t>(L, 9, outfit.lookMount); outfit.lookAddons = getNumber<uint8_t>(L, 8, outfit.lookAddons); outfit.lookFeet = getNumber<uint8_t>(L, 7); outfit.lookLegs = getNumber<uint8_t>(L, 6); outfit.lookBody = getNumber<uint8_t>(L, 5); outfit.lookHead = getNumber<uint8_t>(L, 4); outfit.lookType = getNumber<uint16_t>(L, 3); outfit.lookTypeEx = getNumber<uint16_t>(L, 2); } ConditionOutfit* condition = dynamic_cast<ConditionOutfit*>(getUserdata<Condition>(L, 1)); if (condition) { condition->setOutfit(outfit); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaConditionAddDamage(lua_State* L) { // condition:addDamage(rounds, time, value) int32_t value = getNumber<int32_t>(L, 4); int32_t time = getNumber<int32_t>(L, 3); int32_t rounds = getNumber<int32_t>(L, 2); ConditionDamage* condition = dynamic_cast<ConditionDamage*>(getUserdata<Condition>(L, 1)); if (condition) { pushBoolean(L, condition->addDamage(rounds, time, value)); } else { lua_pushnil(L); } return 1; } // MonsterType int LuaScriptInterface::luaMonsterTypeCreate(lua_State* L) { // MonsterType(name) MonsterType* monsterType = g_monsters.getMonsterType(getString(L, 2)); if (monsterType) { pushUserdata<MonsterType>(L, monsterType); setMetatable(L, -1, "MonsterType"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsAttackable(lua_State* L) { // monsterType:isAttackable() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.isAttackable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsConvinceable(lua_State* L) { // monsterType:isConvinceable() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.isConvinceable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsSummonable(lua_State* L) { // monsterType:isSummonable() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.isSummonable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsIllusionable(lua_State* L) { // monsterType:isIllusionable() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.isIllusionable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsHostile(lua_State* L) { // monsterType:isHostile() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.isHostile); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsPushable(lua_State* L) { // monsterType:isPushable() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.pushable); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeIsHealthShown(lua_State* L) { // monsterType:isHealthShown() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, !monsterType->info.hiddenHealth); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeCanPushItems(lua_State* L) { // monsterType:canPushItems() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.canPushItems); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeCanPushCreatures(lua_State* L) { // monsterType:canPushCreatures() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushBoolean(L, monsterType->info.canPushCreatures); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetName(lua_State* L) { // monsterType:getName() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushString(L, monsterType->name); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetNameDescription(lua_State* L) { // monsterType:getNameDescription() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushString(L, monsterType->nameDescription); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetHealth(lua_State* L) { // monsterType:getHealth() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.health); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetMaxHealth(lua_State* L) { // monsterType:getMaxHealth() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.healthMax); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetRunHealth(lua_State* L) { // monsterType:getRunHealth() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.runAwayHealth); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetExperience(lua_State* L) { // monsterType:getExperience() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.experience); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetCombatImmunities(lua_State* L) { // monsterType:getCombatImmunities() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.damageImmunities); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetConditionImmunities(lua_State* L) { // monsterType:getConditionImmunities() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.conditionImmunities); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetAttackList(lua_State* L) { // monsterType:getAttackList() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } lua_createtable(L, monsterType->info.attackSpells.size(), 0); int index = 0; for (const auto& spellBlock : monsterType->info.attackSpells) { lua_createtable(L, 0, 8); setField(L, "chance", spellBlock.chance); setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0); setField(L, "isMelee", spellBlock.isMelee ? 1 : 0); setField(L, "minCombatValue", spellBlock.minCombatValue); setField(L, "maxCombatValue", spellBlock.maxCombatValue); setField(L, "range", spellBlock.range); setField(L, "speed", spellBlock.speed); pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell)); lua_setfield(L, -2, "spell"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterTypeGetDefenseList(lua_State* L) { // monsterType:getDefenseList() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } lua_createtable(L, monsterType->info.defenseSpells.size(), 0); int index = 0; for (const auto& spellBlock : monsterType->info.defenseSpells) { lua_createtable(L, 0, 8); setField(L, "chance", spellBlock.chance); setField(L, "isCombatSpell", spellBlock.combatSpell ? 1 : 0); setField(L, "isMelee", spellBlock.isMelee ? 1 : 0); setField(L, "minCombatValue", spellBlock.minCombatValue); setField(L, "maxCombatValue", spellBlock.maxCombatValue); setField(L, "range", spellBlock.range); setField(L, "speed", spellBlock.speed); pushUserdata<CombatSpell>(L, static_cast<CombatSpell*>(spellBlock.spell)); lua_setfield(L, -2, "spell"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterTypeGetElementList(lua_State* L) { // monsterType:getElementList() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } lua_createtable(L, monsterType->info.elementMap.size(), 0); for (const auto& elementEntry : monsterType->info.elementMap) { lua_pushnumber(L, elementEntry.second); lua_rawseti(L, -2, elementEntry.first); } return 1; } int LuaScriptInterface::luaMonsterTypeGetVoices(lua_State* L) { // monsterType:getVoices() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } int index = 0; lua_createtable(L, monsterType->info.voiceVector.size(), 0); for (const auto& voiceBlock : monsterType->info.voiceVector) { lua_createtable(L, 0, 2); setField(L, "text", voiceBlock.text); setField(L, "yellText", voiceBlock.yellText); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterTypeGetLoot(lua_State* L) { // monsterType:getLoot() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } static const std::function<void(const std::vector<LootBlock>&)> parseLoot = [&](const std::vector<LootBlock>& lootList) { lua_createtable(L, lootList.size(), 0); int index = 0; for (const auto& lootBlock : lootList) { lua_createtable(L, 0, 7); setField(L, "itemId", lootBlock.id); setField(L, "chance", lootBlock.chance); setField(L, "subType", lootBlock.subType); setField(L, "maxCount", lootBlock.countmax); setField(L, "actionId", lootBlock.actionId); setField(L, "text", lootBlock.text); parseLoot(lootBlock.childLoot); lua_setfield(L, -2, "childLoot"); lua_rawseti(L, -2, ++index); } }; parseLoot(monsterType->info.lootItems); return 1; } int LuaScriptInterface::luaMonsterTypeGetCreatureEvents(lua_State* L) { // monsterType:getCreatureEvents() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } int index = 0; lua_createtable(L, monsterType->info.scripts.size(), 0); for (const std::string& creatureEvent : monsterType->info.scripts) { pushString(L, creatureEvent); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterTypeGetSummonList(lua_State* L) { // monsterType:getSummonList() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } int index = 0; lua_createtable(L, monsterType->info.summons.size(), 0); for (const auto& summonBlock : monsterType->info.summons) { lua_createtable(L, 0, 3); setField(L, "name", summonBlock.name); setField(L, "speed", summonBlock.speed); setField(L, "chance", summonBlock.chance); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaMonsterTypeGetMaxSummons(lua_State* L) { // monsterType:getMaxSummons() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.maxSummons); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetArmor(lua_State* L) { // monsterType:getArmor() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.armor); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetDefense(lua_State* L) { // monsterType:getDefense() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.defense); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetOutfit(lua_State* L) { // monsterType:getOutfit() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { pushOutfit(L, monsterType->info.outfit); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetRace(lua_State* L) { // monsterType:getRace() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.race); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetCorpseId(lua_State* L) { // monsterType:getCorpseId() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.lookcorpse); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetManaCost(lua_State* L) { // monsterType:getManaCost() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.manaCost); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetBaseSpeed(lua_State* L) { // monsterType:getBaseSpeed() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.baseSpeed); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetLight(lua_State* L) { // monsterType:getLight() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (!monsterType) { lua_pushnil(L); return 1; } lua_pushnumber(L, monsterType->info.light.level); lua_pushnumber(L, monsterType->info.light.color); return 2; } int LuaScriptInterface::luaMonsterTypeGetStaticAttackChance(lua_State* L) { // monsterType:getStaticAttackChance() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.staticAttackChance); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetTargetDistance(lua_State* L) { // monsterType:getTargetDistance() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.targetDistance); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetYellChance(lua_State* L) { // monsterType:getYellChance() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.yellChance); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetYellSpeedTicks(lua_State* L) { // monsterType:getYellSpeedTicks() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.yellSpeedTicks); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetChangeTargetChance(lua_State* L) { // monsterType:getChangeTargetChance() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.changeTargetChance); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaMonsterTypeGetChangeTargetSpeed(lua_State* L) { // monsterType:getChangeTargetSpeed() MonsterType* monsterType = getUserdata<MonsterType>(L, 1); if (monsterType) { lua_pushnumber(L, monsterType->info.changeTargetSpeed); } else { lua_pushnil(L); } return 1; } // Party int LuaScriptInterface::luaPartyDisband(lua_State* L) { // party:disband() Party** partyPtr = getRawUserdata<Party>(L, 1); if (partyPtr && *partyPtr) { Party*& party = *partyPtr; party->disband(); party = nullptr; pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyGetLeader(lua_State* L) { // party:getLeader() Party* party = getUserdata<Party>(L, 1); if (!party) { lua_pushnil(L); return 1; } Player* leader = party->getLeader(); if (leader) { pushUserdata<Player>(L, leader); setMetatable(L, -1, "Player"); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartySetLeader(lua_State* L) { // party:setLeader(player) Player* player = getPlayer(L, 2); Party* party = getUserdata<Party>(L, 1); if (party && player) { pushBoolean(L, party->passPartyLeadership(player)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyGetMembers(lua_State* L) { // party:getMembers() Party* party = getUserdata<Party>(L, 1); if (!party) { lua_pushnil(L); return 1; } int index = 0; lua_createtable(L, party->getMemberCount(), 0); for (Player* player : party->getMembers()) { pushUserdata<Player>(L, player); setMetatable(L, -1, "Player"); lua_rawseti(L, -2, ++index); } return 1; } int LuaScriptInterface::luaPartyGetMemberCount(lua_State* L) { // party:getMemberCount() Party* party = getUserdata<Party>(L, 1); if (party) { lua_pushnumber(L, party->getMemberCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyGetInvitees(lua_State* L) { // party:getInvitees() Party* party = getUserdata<Party>(L, 1); if (party) { lua_createtable(L, party->getInvitationCount(), 0); int index = 0; for (Player* player : party->getInvitees()) { pushUserdata<Player>(L, player); setMetatable(L, -1, "Player"); lua_rawseti(L, -2, ++index); } } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyGetInviteeCount(lua_State* L) { // party:getInviteeCount() Party* party = getUserdata<Party>(L, 1); if (party) { lua_pushnumber(L, party->getInvitationCount()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyAddInvite(lua_State* L) { // party:addInvite(player) Player* player = getPlayer(L, 2); Party* party = getUserdata<Party>(L, 1); if (party && player) { pushBoolean(L, party->invitePlayer(*player)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyRemoveInvite(lua_State* L) { // party:removeInvite(player) Player* player = getPlayer(L, 2); Party* party = getUserdata<Party>(L, 1); if (party && player) { pushBoolean(L, party->removeInvite(*player)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyAddMember(lua_State* L) { // party:addMember(player) Player* player = getPlayer(L, 2); Party* party = getUserdata<Party>(L, 1); if (party && player) { pushBoolean(L, party->joinParty(*player)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyRemoveMember(lua_State* L) { // party:removeMember(player) Player* player = getPlayer(L, 2); Party* party = getUserdata<Party>(L, 1); if (party && player) { pushBoolean(L, party->leaveParty(player)); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyIsSharedExperienceActive(lua_State* L) { // party:isSharedExperienceActive() Party* party = getUserdata<Party>(L, 1); if (party) { pushBoolean(L, party->isSharedExperienceActive()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyIsSharedExperienceEnabled(lua_State* L) { // party:isSharedExperienceEnabled() Party* party = getUserdata<Party>(L, 1); if (party) { pushBoolean(L, party->isSharedExperienceEnabled()); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartyShareExperience(lua_State* L) { // party:shareExperience(experience) uint64_t experience = getNumber<uint64_t>(L, 2); Party* party = getUserdata<Party>(L, 1); if (party) { party->shareExperience(experience); pushBoolean(L, true); } else { lua_pushnil(L); } return 1; } int LuaScriptInterface::luaPartySetSharedExperience(lua_State* L) { // party:setSharedExperience(active) bool active = getBoolean(L, 2); Party* party = getUserdata<Party>(L, 1); if (party) { pushBoolean(L, party->setSharedExperience(party->getLeader(), active)); } else { lua_pushnil(L); } return 1; } // LuaEnvironment::LuaEnvironment() : LuaScriptInterface("Main Interface") {} LuaEnvironment::~LuaEnvironment() { delete testInterface; closeState(); } bool LuaEnvironment::initState() { luaState = luaL_newstate(); if (!luaState) { return false; } luaL_openlibs(luaState); registerFunctions(); runningEventId = EVENT_ID_USER; return true; } bool LuaEnvironment::reInitState() { // TODO: get children, reload children closeState(); return initState(); } bool LuaEnvironment::closeState() { if (!luaState) { return false; } for (const auto& combatEntry : combatIdMap) { clearCombatObjects(combatEntry.first); } for (const auto& areaEntry : areaIdMap) { clearAreaObjects(areaEntry.first); } for (auto& timerEntry : timerEvents) { LuaTimerEventDesc timerEventDesc = std::move(timerEntry.second); for (int32_t parameter : timerEventDesc.parameters) { luaL_unref(luaState, LUA_REGISTRYINDEX, parameter); } luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function); } combatIdMap.clear(); areaIdMap.clear(); timerEvents.clear(); cacheFiles.clear(); lua_close(luaState); luaState = nullptr; return true; } LuaScriptInterface* LuaEnvironment::getTestInterface() { if (!testInterface) { testInterface = new LuaScriptInterface("Test Interface"); testInterface->initState(); } return testInterface; } Combat* LuaEnvironment::getCombatObject(uint32_t id) const { auto it = combatMap.find(id); if (it == combatMap.end()) { return nullptr; } return it->second; } Combat* LuaEnvironment::createCombatObject(LuaScriptInterface* interface) { Combat* combat = new Combat; combatMap[++lastCombatId] = combat; combatIdMap[interface].push_back(lastCombatId); return combat; } void LuaEnvironment::clearCombatObjects(LuaScriptInterface* interface) { auto it = combatIdMap.find(interface); if (it == combatIdMap.end()) { return; } for (uint32_t id : it->second) { auto itt = combatMap.find(id); if (itt != combatMap.end()) { delete itt->second; combatMap.erase(itt); } } it->second.clear(); } AreaCombat* LuaEnvironment::getAreaObject(uint32_t id) const { auto it = areaMap.find(id); if (it == areaMap.end()) { return nullptr; } return it->second; } uint32_t LuaEnvironment::createAreaObject(LuaScriptInterface* interface) { areaMap[++lastAreaId] = new AreaCombat; areaIdMap[interface].push_back(lastAreaId); return lastAreaId; } void LuaEnvironment::clearAreaObjects(LuaScriptInterface* interface) { auto it = areaIdMap.find(interface); if (it == areaIdMap.end()) { return; } for (uint32_t id : it->second) { auto itt = areaMap.find(id); if (itt != areaMap.end()) { delete itt->second; areaMap.erase(itt); } } it->second.clear(); } void LuaEnvironment::executeTimerEvent(uint32_t eventIndex) { auto it = timerEvents.find(eventIndex); if (it == timerEvents.end()) { return; } LuaTimerEventDesc timerEventDesc = std::move(it->second); timerEvents.erase(it); //push function lua_rawgeti(luaState, LUA_REGISTRYINDEX, timerEventDesc.function); //push parameters for (auto parameter : boost::adaptors::reverse(timerEventDesc.parameters)) { lua_rawgeti(luaState, LUA_REGISTRYINDEX, parameter); } //call the function if (reserveScriptEnv()) { ScriptEnvironment* env = getScriptEnv(); env->setTimerEvent(); env->setScriptId(timerEventDesc.scriptId, this); callFunction(timerEventDesc.parameters.size()); } else { std::cout << "[Error - LuaScriptInterface::executeTimerEvent] Call stack overflow" << std::endl; } //free resources luaL_unref(luaState, LUA_REGISTRYINDEX, timerEventDesc.function); for (auto parameter : timerEventDesc.parameters) { luaL_unref(luaState, LUA_REGISTRYINDEX, parameter); } }
1
13,278
That's a huge load of new dependencies. I think it's better to create a reload function on ScriptManager and control with enum instead.
otland-forgottenserver
cpp
@@ -42,6 +42,7 @@ func NewJob() *cobra.Command { NewUpgradeJivaVolumeJob(), NewUpgradeCStorSPCJob(), NewUpgradeCStorVolumeJob(), + NewUpgradeTaskJob(), ) cmd.PersistentFlags().StringVarP(&options.fromVersion,
1
/* Copyright 2019 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package executor import ( "flag" //"fmt" //"os" "strings" //"github.com/golang/glog" "github.com/spf13/cobra" //errors "github.com/openebs/maya/pkg/errors/v1alpha1" ) // NewJob will setup a new upgrade job func NewJob() *cobra.Command { // Create a new command. cmd := &cobra.Command{ Use: "upgrade", Short: "OpenEBS Upgrade Utility", Long: `An utility to upgrade OpenEBS Storage Pools and Volumes, run as a Kubernetes Job`, PersistentPreRun: PreRun, } cmd.AddCommand( NewUpgradeJivaVolumeJob(), NewUpgradeCStorSPCJob(), NewUpgradeCStorVolumeJob(), ) cmd.PersistentFlags().StringVarP(&options.fromVersion, "from-version", "", options.fromVersion, "current version of the resource.") cmd.PersistentFlags().StringVarP(&options.toVersion, "to-version", "", options.toVersion, "new version to which resource should be upgraded.") cmd.PersistentFlags().StringVarP(&options.openebsNamespace, "openebs-namespace", "", options.openebsNamespace, "namespace where openebs components are installed.") cmd.PersistentFlags().StringVarP(&options.imageURLPrefix, "to-version-image-prefix", "", options.imageURLPrefix, "[optional] custom image prefix.") cmd.PersistentFlags().StringVarP(&options.toVersionImageTag, "to-version-image-tag", "", options.toVersionImageTag, "[optional] custom image tag. If not specified, to-version will be used") cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) // Hack: Without the following line, the logs will be prefixed with Error _ = flag.CommandLine.Parse([]string{}) return cmd } // PreRun will check for environement variables to be read and intialized. func PreRun(cmd *cobra.Command, args []string) { namespace := getOpenEBSNamespace() if len(strings.TrimSpace(namespace)) != 0 { options.openebsNamespace = namespace } }
1
17,226
`NewUpgradeTaskJob` -> `NewUpgradeResource`
openebs-maya
go
@@ -0,0 +1,16 @@ +#include "system.h" + +#include <glibtop.h> +#include <glibtop/mem.h> + +#include <glib.h> +#include <unistd.h> + +int luaA_sys_get_meminfo(lua_State *L) +{ + glibtop_mem mem; + glibtop_init(); + glibtop_get_mem(&mem); + lua_pushinteger(L, mem.cached); + return 1; +}
1
1
8,824
Is that the memory usage of the system or the process / awesome?
awesomeWM-awesome
c
@@ -209,8 +209,8 @@ class Realm { /** * Add a listener `callback` for the specified event `name`. * @param {string} name - The name of event that should cause the callback to be called. - * _Currently, only the "change" event supported_. - * @param {callback(Realm, string)} callback - Function to be called when the event occurs. + * _Currently, only the "change" or "schema" events supported_. + * @param {callback(Realm, string)|callback(Realm, Schema)} callback - Function to be called when a change event occurs. * Each callback will only be called once per event, regardless of the number of times * it was added. * @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// /** * A Realm instance represents a Realm database. * * ```js * const Realm = require('realm'); * ``` * */ class Realm { /** * Indicates if this Realm contains any objects. * @type {boolean} * @readonly * @since 1.10.0 */ get empty() {} /** * The path to the file where this Realm is stored. * @type {string} * @readonly * @since 0.12.0 */ get path() {} /** * Indicates if this Realm was opened as read-only. * @type {boolean} * @readonly * @since 0.12.0 */ get readOnly() {} /** * A normalized representation of the schema provided in the * {@link Realm~Configuration Configuration} when this Realm was constructed. * @type {Realm~ObjectSchema[]} * @readonly * @since 0.12.0 */ get schema() {} /** * The current schema version of this Realm. * @type {number} * @readonly * @since 0.12.0 */ get schemaVersion() {} /** * Indicates if this Realm is in a write transaction. * @type {boolean} * @readonly * @since 1.10.3 */ get isInTransaction() {} /** * Indicates if this Realm has been closed. * @type {boolean} * @readonly * @since 2.1.0 */ get isClosed() {} /** * Gets the sync session if this is a synced Realm * @type {Session} */ get syncSession() {} /** * Create a new `Realm` instance using the provided `config`. If a Realm does not yet exist * at `config.path` (or {@link Realm.defaultPath} if not provided), then this constructor * will create it with the provided `config.schema` (which is _required_ in this case). * Otherwise, the instance will access the existing Realm from the file at that path. * In this case, `config.schema` is _optional_ or not have changed, unless * `config.schemaVersion` is incremented, in which case the Realm will be automatically * migrated to use the new schema. * @param {Realm~Configuration} [config] - **Required** when first creating the Realm. * @throws {Error} If anything in the provided `config` is invalid. * @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened */ constructor(config) {} /** * Open a Realm asynchronously with a promise. If the Realm is synced, it will be fully * synchronized before it is available. * @param {Realm~Configuration} config - if no config is defined, it will open the default realm * @returns {ProgressPromise} - a promise that will be resolved with the Realm instance when it's available. */ static open(config) {} /** * Open a Realm asynchronously with a callback. If the Realm is synced, it will be fully * synchronized before it is available. * @param {Realm~Configuration} config * @param {callback(error, realm)} - will be called when the Realm is ready. * @param {callback(transferred, transferable)} [progressCallback] - an optional callback for download progress notifications * @throws {Error} If anything in the provided `config` is invalid * @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened */ static openAsync(config, callback, progressCallback) {} /** * Return a configuration for a default synced Realm. The server URL for the user will be used as base for * the URL for the synced Realm. If no user is supplied, the current user will be used. * @param {Realm.Sync.User} - an optional sync user * @throws {Error} if zero or multiple users are logged in * @returns {Realm~Configuration} - a configuration matching a default synced Realm. * @since 2.3.0 */ static automaticSyncConfiguration(user) {} /** * Closes this Realm so it may be re-opened with a newer schema version. * All objects and collections from this Realm are no longer valid after calling this method. */ close() {} /** * Returns the granted privilges. * * This combines all privileges granted on the Realm/Class/Object by all Roles which * the current User is a member of into the final privileges which will * be enforced by the server. * * The privilege calculation is done locally using cached data, and inherently may * be stale. It is possible that this method may indicate that an operation is * permitted but the server will still reject it if permission is revoked before * the changes have been integrated on the server. * * Non-synchronized Realms always have permission to perform all operations. * * @param {(Realm~ObjectType|Realm.Object)} arg - the object type or the object to compute priviliges from * @returns {Object} as the computed priviliges as properties * @since 2.3.0 * @see {Realm.Permissions} for details of priviliges and roles. */ privileges(arg) {} /** * Create a new Realm object of the given type and with the specified properties. * @param {Realm~ObjectType} type - The type of Realm object to create. * @param {Object} properties - Property values for all required properties without a * default value. * @param {boolean} [update=false] - Signals that an existing object with matching primary key * should be updated. Only the primary key property and properties which should be updated * need to be specified. All missing property values will remain unchanged. * @returns {Realm.Object} */ create(type, properties, update) {} /** * Deletes the provided Realm object, or each one inside the provided collection. * @param {Realm.Object|Realm.Object[]|Realm.List|Realm.Results} object */ delete(object) {} /** * Deletes a Realm model, including all of its objects. * @param {string} name - the model name */ deleteModel(name) {} /** * **WARNING:** This will delete **all** objects in the Realm! */ deleteAll() {} /** * Returns all objects of the given `type` in the Realm. * @param {Realm~ObjectType} type - The type of Realm objects to retrieve. * @throws {Error} If type passed into this method is invalid. * @returns {Realm.Results} that will live-update as objects are created and destroyed. */ objects(type) {} /** * Searches for a Realm object by its primary key. * @param {Realm~ObjectType} type - The type of Realm object to search for. * @param {number|string} key - The primary key value of the object to search for. * @throws {Error} If type passed into this method is invalid or if the object type did * not have a `primaryKey` specified in its {@link Realm~ObjectSchema ObjectSchema}. * @returns {Realm.Object|undefined} if no object is found. * @since 0.14.0 */ objectForPrimaryKey(type, key) {} /** * Add a listener `callback` for the specified event `name`. * @param {string} name - The name of event that should cause the callback to be called. * _Currently, only the "change" event supported_. * @param {callback(Realm, string)} callback - Function to be called when the event occurs. * Each callback will only be called once per event, regardless of the number of times * it was added. * @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function. */ addListener(name, callback) {} /** * Remove the listener `callback` for the specfied event `name`. * @param {string} name - The event name. * _Currently, only the "change" event supported_. * @param {callback(Realm, string)} callback - Function that was previously added as a * listener for this event through the {@link Realm#addListener addListener} method. * @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function. */ removeListener(name, callback) {} /** * Remove all event listeners (restricted to the event `name`, if provided). * @param {string} [name] - The name of the event whose listeners should be removed. * _Currently, only the "change" event supported_. * @throws {Error} When invalid event `name` is supplied */ removeAllListeners(name) {} /** * Synchronously call the provided `callback` inside a write transaction. * @param {function()} callback */ write(callback) {} /** * Initiate a write transaction. * @throws {Error} When already in write transaction */ beginTransaction() {} /** * Commit a write transaction. */ commitTransaction() {} /** * Cancel a write transaction. */ cancelTransaction() {} /** * Replaces all string columns in this Realm with a string enumeration column and compacts the * database file. * * Cannot be called from a write transaction. * * Compaction will not occur if other `Realm` instances exist. * * While compaction is in progress, attempts by other threads or processes to open the database will * wait. * * Be warned that resource requirements for compaction is proportional to the amount of live data in * the database. Compaction works by writing the database contents to a temporary database file and * then replacing the database with the temporary one. * @returns {true} if compaction succeeds. */ compact() {} /** * Writes a compacted copy of the Realm to the given path. * * The destination file cannot already exist. * * Note that if this method is called from within a write transaction, the current data is written, * not the data from the point when the previous write transaction was committed. * @param {string} path path to save the Realm to * @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Optional 64-byte encryption key to encrypt the new file with. */ writeCopyTo(path, encryptionKey) {} /** * Get the current schema version of the Realm at the given path. * @param {string} path - The path to the file where the * Realm database is stored. * @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Required only when * accessing encrypted Realms. * @throws {Error} When passing an invalid or non-matching encryption key. * @returns {number} version of the schema, or `-1` if no Realm exists at `path`. */ static schemaVersion(path, encryptionKey) {} /** * Delete the Realm file for the given configuration. * @param {Realm~Configuration} config * @throws {Error} If anything in the provided `config` is invalid. */ static deleteFile(config) {} } /** * This describes the different options used to create a {@link Realm} instance. * @typedef Realm~Configuration * @type {Object} * @property {ArrayBuffer|ArrayBufferView} [encryptionKey] - The 512-bit (64-byte) encryption * key used to encrypt and decrypt all data in the Realm. * @property {callback(Realm, Realm)} [migration] - The function to run if a migration is needed. * This function should provide all the logic for converting data models from previous schemas * to the new schema. * This function takes two arguments: * - `oldRealm` - The Realm before migration is performed. * - `newRealm` - The Realm that uses the latest `schema`, which should be modified as necessary. * @property {boolean} [deleteRealmIfMigrationNeeded=false] - Specifies if this Realm should be deleted * if a migration is needed. * @property {callback(number, number)} [shouldCompactOnLaunch] - The function called when opening * a Realm for the first time during the life of a process to determine if it should be compacted * before being returned to the user. The function takes two arguments: * - `totalSize` - The total file size (data + free space) * - `unusedSize` - The total bytes used by data in the file. * It returns `true` to indicate that an attempt to compact the file should be made. The compaction * will be skipped if another process is accessing it. * @property {string} [path={@link Realm.defaultPath}] - The path to the file where the * Realm database should be stored. * @property {boolean} [inMemory=false] - Specifies if this Realm should be opened in-memory. This * still requires a path (can be the default path) to identify the Realm so other processes can * open the same Realm. The file will also be used as swap space if the Realm becomes bigger than * what fits in memory, but it is not persistent and will be removed when the last instance * is closed. * @property {boolean} [readOnly=false] - Specifies if this Realm should be opened as read-only. * @property {boolean} [disableFormatUpgrade=false] - Specifies if this Realm's file format should * be automatically upgraded if it was created with an older version of the Realm library. * If set to `true` and a file format upgrade is required, an error will be thrown instead. * @property {Array<Realm~ObjectClass|Realm~ObjectSchema>} [schema] - Specifies all the * object types in this Realm. **Required** when first creating a Realm at this `path`. * If omitted, the schema will be read from the existing Realm file. * @property {number} [schemaVersion] - **Required** (and must be incremented) after * changing the `schema`. * @property {Realm.Sync~SyncConfiguration} [sync] - Sync configuration parameters. */ /** * Realm objects will inherit methods, getters, and setters from the `prototype` of this * constructor. It is **highly recommended** that this constructor inherit from * {@link Realm.Object}. * @typedef Realm~ObjectClass * @type {Class} * @property {Realm~ObjectSchema} schema - Static property specifying object schema information. */ /** * @typedef Realm~ObjectSchema * @type {Object} * @property {string} name - Represents the object type. * @property {string} [primaryKey] - The name of a `"string"` or `"int"` property * that must be unique across all objects of this type within the same Realm. * @property {Object<string, (Realm~PropertyType|Realm~ObjectSchemaProperty)>} properties - * An object where the keys are property names and the values represent the property type. * * @example * let MyClassSchema = { * name: 'MyClass', * primaryKey: 'pk', * properties: { * pk: 'int', * optionalFloatValue: 'float?' // or {type: 'float', optional: true} * listOfStrings: 'string[]', * listOfOptionalDates: 'date?[]', * indexedInt: {type: 'int', indexed: true} * * linkToObject: 'MyClass', * listOfObjects: 'MyClass[]', // or {type: 'list', objectType: 'MyClass'} * objectsLinkingToThisObject: {type: 'linkingObjects', objectType: 'MyClass', property: 'linkToObject'} * } * }; */ /** * @typedef Realm~ObjectSchemaProperty * @type {Object} * @property {Realm~PropertyType} type - The type of this property. * @property {Realm~PropertyType} [objectType] - **Required** when `type` is `"list"` or `"linkingObjects"`, * and must match the type of an object in the same schema, or, for `"list"` * only, any other type which may be stored as a Realm property. * @property {string} [property] - **Required** when `type` is `"linkingObjects"`, and must match * the name of a property on the type specified in `objectType` that links to the type this property belongs to. * @property {any} [default] - The default value for this property on creation when not * otherwise specified. * @property {boolean} [optional] - Signals if this property may be assigned `null` or `undefined`. * For `"list"` properties of non-object types, this instead signals whether the values inside the list may be assigned `null` or `undefined`. * This is not supported for `"list"` properties of object types and `"linkingObjects"` properties. * @property {boolean} [indexed] - Signals if this property should be indexed. Only supported for * `"string"`, `"int"`, and `"bool"` properties. */ /** * The type of an object may either be specified as a string equal to the `name` in a * {@link Realm~ObjectSchema ObjectSchema} definition, **or** a constructor that was specified * in the {@link Realm~Configuration configuration} `schema`. * @typedef Realm~ObjectType * @type {string|Realm~ObjectClass} */ /** * A property type may be specified as one of the standard builtin types, or as * an object type inside the same schema. * * When specifying property types in an {@linkplain Realm~ObjectSchema object schema}, you * may append `?` to any of the property types to indicate that it is optional * (i.e. it can be `null` in addition to the normal values) and `[]` to * indicate that it is instead a list of that type. For example, * `optionalIntList: 'int?[]'` would declare a property which is a list of * nullable integers. The property types reported by {@linkplain Realm.Collection * collections} and in a Realm's schema will never * use these forms. * * @typedef Realm~PropertyType * @type {("bool"|"int"|"float"|"double"|"string"|"date"|"data"|"list"|"linkingObjects"|"<ObjectType>")} * * @property {boolean} "bool" - Property value may either be `true` or `false`. * @property {number} "int" - Property may be assigned any number, but will be stored as a * round integer, meaning anything after the decimal will be truncated. * @property {number} "float" - Property may be assigned any number, but will be stored as a * `float`, which may result in a loss of precision. * @property {number} "double" - Property may be assigned any number, and will have no loss * of precision. * @property {string} "string" - Property value may be any arbitrary string. * @property {Date} "date" - Property may be assigned any `Date` instance. * @property {ArrayBuffer} "data" - Property may either be assigned an `ArrayBuffer` * or `ArrayBufferView` (e.g. `DataView`, `Int8Array`, `Float32Array`, etc.) instance, * but will always be returned as an `ArrayBuffer`. * @property {Realm.List} "list" - Property may be assigned any ordered collection * (e.g. `Array`, {@link Realm.List}, {@link Realm.Results}) of objects all matching the * `objectType` specified in the {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}. * @property {Realm.Results} "linkingObjects" - Property is read-only and always returns a {@link Realm.Results} * of all the objects matching the `objectType` that are linking to the current object * through the `property` relationship specified in {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}. * @property {Realm.Object} "<ObjectType>" - A string that matches the `name` of an object in the * same schema (see {@link Realm~ObjectSchema ObjectSchema}) – this property may be assigned * any object of this type from inside the same Realm, and will always be _optional_ * (meaning it may also be assigned `null` or `undefined`). */
1
17,033
"change" *and* "schema" events (A bunch of these; not marking them all).
realm-realm-js
js
@@ -73,4 +73,13 @@ public interface RewriteFiles extends SnapshotUpdate<RewriteFiles> { * @return this for method chaining */ RewriteFiles validateFromSnapshot(long snapshotId); + + /** + * Use the specified sequence number for the new manifest of the data files added in this update, + * instead of inheriting the sequence number of the snapshot that will be created. + * + * @param sequenceNumber a sequence number + * @return this for method chaining + */ + RewriteFiles overrideSequenceNumberForNewDataFiles(long sequenceNumber); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.util.Set; import org.apache.iceberg.exceptions.ValidationException; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; /** * API for replacing files in a table. * <p> * This API accumulates file additions and deletions, produces a new {@link Snapshot} of the * changes, and commits that snapshot as the current. * <p> * When committing, these changes will be applied to the latest table snapshot. Commit conflicts * will be resolved by applying the changes to the new latest snapshot and reattempting the commit. * If any of the deleted files are no longer in the latest snapshot when reattempting, the commit * will throw a {@link ValidationException}. */ public interface RewriteFiles extends SnapshotUpdate<RewriteFiles> { /** * Add a rewrite that replaces one set of data files with another set that contains the same data. * * @param filesToDelete files that will be replaced (deleted), cannot be null or empty. * @param filesToAdd files that will be added, cannot be null or empty. * @return this for method chaining */ default RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd) { return rewriteFiles( filesToDelete, ImmutableSet.of(), filesToAdd, ImmutableSet.of() ); } /** * Add a rewrite that replaces one set of files with another set that contains the same data. * * @param dataFilesToReplace data files that will be replaced (deleted). * @param deleteFilesToReplace delete files that will be replaced (deleted). * @param dataFilesToAdd data files that will be added. * @param deleteFilesToAdd delete files that will be added. * @return this for method chaining. */ RewriteFiles rewriteFiles(Set<DataFile> dataFilesToReplace, Set<DeleteFile> deleteFilesToReplace, Set<DataFile> dataFilesToAdd, Set<DeleteFile> deleteFilesToAdd); /** * Set the snapshot ID used in any reads for this operation. * <p> * Validations will check changes after this snapshot ID. If this is not called, all ancestor snapshots through the * table's initial snapshot are validated. * * @param snapshotId a snapshot ID * @return this for method chaining */ RewriteFiles validateFromSnapshot(long snapshotId); }
1
45,767
I think there is probably a shorter, more descriptive name for this. Something like `commitAtSequenceNumber`?
apache-iceberg
java
@@ -23,6 +23,7 @@ class Organization < ActiveRecord::Base scope :case_insensitive_url_name, ->(mixed_case) { where(['lower(url_name) = ?', mixed_case.downcase]) } validates :name, presence: true, length: 3..85, uniqueness: { case_sensitive: false } + validates :url_name, presence: true, length: 1..60, allow_nil: false, uniqueness: true, case_sensitive: false validates :description, length: 0..800, allow_nil: true validates :org_type, inclusion: { in: ORG_TYPES.values } before_validation :clean_strings_and_urls
1
class Organization < ActiveRecord::Base include OrganizationSearchables include Tsearch ORG_TYPES = { 'Commercial' => 1, 'Education' => 2, 'Government' => 3, 'Non-Profit' => 4 } fix_string_column_encodings! belongs_to :logo has_one :permission, as: :target has_many :projects, -> { where.not(deleted: true) } has_many :accounts, -> { where(Account.arel_table[:level].gteq(0)) } has_many :manages, -> { where(deleted_at: nil, deleted_by: nil) }, as: 'target' has_many :managers, through: :manages, source: :account scope :from_param, lambda { |param| active.where(Organization.arel_table[:url_name].eq(param).or(Organization.arel_table[:id].eq(param))) } scope :active, -> { where.not(deleted: true) } scope :managed_by, lambda { |account| joins(:manages).where.not(deleted: true, manages: { approved_by: nil }).where(manages: { account_id: account.id }) } scope :case_insensitive_url_name, ->(mixed_case) { where(['lower(url_name) = ?', mixed_case.downcase]) } validates :name, presence: true, length: 3..85, uniqueness: { case_sensitive: false } validates :description, length: 0..800, allow_nil: true validates :org_type, inclusion: { in: ORG_TYPES.values } before_validation :clean_strings_and_urls acts_as_editable editable_attributes: [:name, :url_name, :org_type, :logo_id, :description, :homepage_url], merge_within: 30.minutes acts_as_protected after_create :create_restricted_permission after_save :check_change_in_delete def to_param url_name || id.to_s end def active_managers Manage.organizations.for_target(self).active.to_a.map(&:account) end def allow_undo_to_nil?(key) ![:name, :org_type].include?(key) end def org_type_label ORG_TYPES.invert[org_type] || '' end def affiliated_committers_stats Organization::Affiliated.new(self).stats end def affiliated_committers(page, limit) Organization::Affiliated.new(self).committers(page, limit) end def affiliated_projects(page, limit) Organization::Affiliated.new(self).projects(page, limit) end def outside_committers_stats Organization::Outside.new(self).stats end def outside_committers(page, limit) Organization::Outside.new(self).committers(page, limit) end def outside_projects(page, limit) Organization::Outside.new(self).projects(page, limit) end def affiliators_count @affiliators_count ||= accounts.count(joins: [:person, :active_positions], select: 'DISTINCT(accounts.id)') end private def create_restricted_permission Permission.create(target: self, remainder: true) end def clean_strings_and_urls self.name = String.clean_string(name) self.description = String.clean_string(description) # TODO: fix these once we have links implemented # self.download_url = String.clean_url(download_url) end def project_claim_edits(undone) Edit.where(target_type: 'Project', key: 'organization_id', value: id.to_s, undone: undone).to_a end def check_change_in_delete return false unless changed.include?('deleted') project_claim_edits(!deleted?).each { |edit| edit.send(deleted? ? :undo! : :redo!, editor_account) } end end
1
7,658
The `url_name` should definitely be more than just 1 character long at the shortest. How about 3 or 4 characters?
blackducksoftware-ohloh-ui
rb
@@ -22,9 +22,6 @@ * This copyright notice MUST APPEAR in all copies of the script! ***************************************************************/ -use TYPO3\CMS\Core\Utility\GeneralUtility; -use TYPO3\CMS\Frontend\Controller\TypoScriptFrontendController; - /** * Page Indexer to index TYPO3 pages used by the Index Queue.
1
<?php /*************************************************************** * Copyright notice * * (c) 2009-2015 Ingo Renner <[email protected]> * All rights reserved * * This script is part of the TYPO3 project. The TYPO3 project is * free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The GNU General Public License can be found at * http://www.gnu.org/copyleft/gpl.html. * * This script is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This copyright notice MUST APPEAR in all copies of the script! ***************************************************************/ use TYPO3\CMS\Core\Utility\GeneralUtility; use TYPO3\CMS\Frontend\Controller\TypoScriptFrontendController; /** * Page Indexer to index TYPO3 pages used by the Index Queue. * * @author Ingo Renner <[email protected]> * @author Daniel Poetzinger <[email protected]> * @author Timo Schmidt <[email protected]> * @package TYPO3 * @subpackage solr */ class Tx_Solr_Typo3PageIndexer { /** * Solr server connection. * * @var Tx_Solr_SolrService */ protected $solrConnection = NULL; /** * Frontend page object (TSFE). * * @var TypoScriptFrontendController */ protected $page = NULL; /** * Content extractor to extract content from TYPO3 pages * * @var Tx_Solr_Typo3PageContentExtractor */ protected $contentExtractor = NULL; /** * URL to be indexed as the page's URL * * @var string */ protected $pageUrl = ''; /** * The page's access rootline * * @var Tx_Solr_Access_Rootline */ protected $pageAccessRootline = NULL; /** * ID of the current page's Solr document. * * @var string */ protected static $pageSolrDocumentId = ''; /** * The Solr document generated for the current page. * * @var Apache_Solr_Document */ protected static $pageSolrDocument = NULL; /** * Documents that have been sent to Solr * * @var array */ protected $documentsSentToSolr = array(); /** * Constructor for class Tx_Solr_Indexer * * @param TypoScriptFrontendController $page The page to index */ public function __construct(TypoScriptFrontendController $page) { $this->page = $page; $this->pageUrl = GeneralUtility::getIndpEnv('TYPO3_REQUEST_URL'); try { $this->initializeSolrConnection(); } catch (Exception $e) { $this->log($e->getMessage() . ' Error code: ' . $e->getCode(), 3); // TODO extract to a class "ExceptionLogger" if ($GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.']['logging.']['exceptions']) { GeneralUtility::devLog('Exception while trying to index a page', 'solr', 3, array( $e->__toString() )); } } $this->contentExtractor = GeneralUtility::makeInstance( 'Tx_Solr_Typo3PageContentExtractor', $this->page->content, $this->page->renderCharset ); $this->pageAccessRootline = GeneralUtility::makeInstance( 'Tx_Solr_Access_Rootline', '' ); } /** * Initializes the Solr server connection. * * @throws Exception when no Solr connection can be established. */ protected function initializeSolrConnection() { $solr = GeneralUtility::makeInstance('Tx_Solr_ConnectionManager')->getConnectionByPageId( $this->page->id, $this->page->sys_language_uid ); // do not continue if no server is available if (!$solr->ping()) { throw new Exception( 'No Solr instance available while trying to index a page.', 1234790825 ); } $this->solrConnection = $solr; } /** * Allows to provide a Solr server connection other than the one * initialized by the constructor. * * @param Tx_Solr_SolrService $solrConnection Solr connection * @throws Exception if the Solr server cannot be reached */ public function setSolrConnection(Tx_Solr_SolrService $solrConnection) { if (!$solrConnection->ping()) { throw new Exception( 'Could not connect to Solr server.', 1323946472 ); } $this->solrConnection = $solrConnection; } /** * Indexes a page. * * @return boolean TRUE after successfully indexing the page, FALSE on error * @throws UnexpectedValueException if a page document post processor fails to implement interface Tx_Solr_PageDocumentPostProcessor */ public function indexPage() { $pageIndexed = FALSE; $documents = array(); // this will become useful as soon as when starting to index individual records instead of whole pages if (is_null($this->solrConnection)) { // intended early return as it doesn't make sense to continue // and waste processing time if the solr server isn't available // anyways // FIXME use an exception return $pageIndexed; } $pageDocument = $this->getPageDocument(); $pageDocument = $this->substitutePageDocument($pageDocument); if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPagePostProcessPageDocument'])) { foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPagePostProcessPageDocument'] as $classReference) { $postProcessor = GeneralUtility::getUserObj($classReference); if ($postProcessor instanceof Tx_Solr_PageDocumentPostProcessor) { $postProcessor->postProcessPageDocument($pageDocument, $this->page); } else { throw new UnexpectedValueException( get_class($pageDocument) . ' must implement interface Tx_Solr_PageDocumentPostProcessor', 1397739154 ); } } } self::$pageSolrDocument = $pageDocument; $documents[] = $pageDocument; $documents = $this->getAdditionalDocuments($pageDocument, $documents); $this->processDocuments($documents); $pageIndexed = $this->addDocumentsToSolrIndex($documents); $this->documentsSentToSolr = $documents; return $pageIndexed; } /** * Builds the Solr document for the current page. * * @return Apache_Solr_Document A document representing the page */ protected function getPageDocument() { $document = GeneralUtility::makeInstance('Apache_Solr_Document'); /* @var $document Apache_Solr_Document */ $site = Tx_Solr_Site::getSiteByPageId($this->page->id); $pageRecord = $this->page->page; self::$pageSolrDocumentId = $documentId = Tx_Solr_Util::getPageDocumentId( $this->page->id, $this->page->type, $this->page->sys_language_uid, $this->getDocumentIdGroups() ); $document->setField('id', $documentId); $document->setField('site', $site->getDomain()); $document->setField('siteHash', $site->getSiteHash()); $document->setField('appKey', 'EXT:solr'); $document->setField('type', 'pages'); // system fields $document->setField('uid', $this->page->id); $document->setField('pid', $pageRecord['pid']); $document->setField('typeNum', $this->page->type); $document->setField('created', $pageRecord['crdate']); $document->setField('changed', $pageRecord['tstamp']); $document->setField('rootline', $this->page->id); // access $document->setField('access', (string) $this->pageAccessRootline); if ($this->page->page['endtime']) { $document->setField('endtime', $pageRecord['endtime']); } // content $document->setField('title', $this->contentExtractor->getPageTitle()); $document->setField('subTitle', $pageRecord['subtitle']); $document->setField('navTitle', $pageRecord['nav_title']); $document->setField('author', $pageRecord['author']); $document->setField('description', $pageRecord['description']); $document->setField('abstract', $pageRecord['abstract']); $document->setField('content', $this->contentExtractor->getIndexableContent()); $document->setField('url', $this->pageUrl); // keywords, multi valued $keywords = array_unique(GeneralUtility::trimExplode( ',', $pageRecord['keywords'], TRUE )); foreach ($keywords as $keyword) { $document->addField('keywords', $keyword); } // content from several tags like headers, anchors, ... $tagContent = $this->contentExtractor->getTagContent(); foreach ($tagContent as $fieldName => $fieldValue) { $document->setField($fieldName, $fieldValue); } return $document; } /** * Adds the collected documents to the Solr index. * * @param array $documents An array of Apache_Solr_Document objects. * @return boolean TRUE if documents were added successfully, FALSE otherwise */ protected function addDocumentsToSolrIndex(array $documents) { $documentsAdded = FALSE; if (!count($documents)) { return $documentsAdded; } try { $this->log('Adding ' . count($documents) . ' documents.', 0, $documents); // chunk adds by 20 $documentChunks = array_chunk($documents, 20); foreach ($documentChunks as $documentChunk) { $response = $this->solrConnection->addDocuments($documentChunk); if ($response->getHttpStatus() != 200) { $transportException = new Apache_Solr_HttpTransportException($response); throw new RuntimeException('Solr Request failed.', 1331834983, $transportException); } } $documentsAdded = TRUE; } catch (Exception $e) { $this->log($e->getMessage() . ' Error code: ' . $e->getCode(), 2); if ($GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.']['logging.']['exceptions']) { GeneralUtility::devLog('Exception while adding documents', 'solr', 3, array( $e->__toString() )); } } return $documentsAdded; } /** * Allows third party extensions to replace or modify the page document * created by this indexer. * * @param Apache_Solr_Document $pageDocument The page document created by this indexer. * @return Apache_Solr_Document An Apache Solr document representing the currently indexed page */ protected function substitutePageDocument(Apache_Solr_Document $pageDocument) { if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageSubstitutePageDocument'])) { foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageSubstitutePageDocument'] as $classReference) { $substituteIndexer = GeneralUtility::getUserObj($classReference); if ($substituteIndexer instanceof Tx_Solr_SubstitutePageIndexer) { $substituteDocument = $substituteIndexer->getPageDocument($pageDocument); if ($substituteDocument instanceof Apache_Solr_Document) { $pageDocument = $substituteDocument; } else { throw new UnexpectedValueException( 'The document returned by ' . get_class($substituteIndexer) . ' is not a valid Apache_Solr_Document document.', 1310490952 ); } } else { throw new UnexpectedValueException( get_class($substituteIndexer) . ' must implement interface Tx_Solr_SubstitutePageIndexer', 1310491001 ); } } } return $pageDocument; } /** * Allows third party extensions to provide additional documents which * should be indexed for the current page. * * @param Apache_Solr_Document $pageDocument The main document representing this page. * @param array $existingDocuments An array of documents already created for this page. * @return array An array of additional Apache_Solr_Document objects to index */ protected function getAdditionalDocuments(Apache_Solr_Document $pageDocument, array $existingDocuments) { $documents = $existingDocuments; if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageAddDocuments'])) { foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageAddDocuments'] as $classReference) { $additionalIndexer = GeneralUtility::getUserObj($classReference); if ($additionalIndexer instanceof Tx_Solr_AdditionalPageIndexer) { $additionalDocuments = $additionalIndexer->getAdditionalPageDocuments($pageDocument, $documents); if (is_array($additionalDocuments)) { $documents = array_merge($documents, $additionalDocuments); } } else { throw new UnexpectedValueException( get_class($additionalIndexer) . ' must implement interface Tx_Solr_AdditionalPageIndexer', 1310491024 ); } } } return $documents; } /** * Sends the given documents to the field processing service which takes * care of manipulating fields as defined in the field's configuration. * * @param array $documents An array of documents to manipulate */ protected function processDocuments(array $documents) { if (is_array($GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.']['index.']['fieldProcessingInstructions.'])) { $service = GeneralUtility::makeInstance('Tx_Solr_FieldProcessor_Service'); $service->processDocuments( $documents, $GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.']['index.']['fieldProcessingInstructions.'] ); } } // Logging // TODO replace by a central logger /** * Logs messages to devlog and TS log (admin panel) * * @param string $message Message to set * @param integer $errorNum Error number * @param array $data Additional data to log * @return void */ protected function log($message, $errorNum = 0, array $data = array()) { if (is_object($GLOBALS['TT'])) { $GLOBALS['TT']->setTSlogMessage('tx_solr: ' . $message, $errorNum); } if ($GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.']['logging.']['indexing']) { if (!empty($data)) { $logData = array(); foreach ($data as $value) { $logData[] = (array) $value; } } GeneralUtility::devLog($message, 'solr', $errorNum, $logData); } } // Misc /** * Gets the current page's URL. * * @return string URL of the current page. */ public function getPageUrl() { return $this->pageUrl; } /** * Sets the URL to use for the page document. * * @param string $url The page's URL. */ public function setPageUrl($url) { $this->pageUrl = $url; } /** * Gets the page's access rootline. * * @return Tx_Solr_Access_Rootline The page's access rootline */ public function getPageAccessRootline() { return $this->pageAccessRootline; } /** * Sets the page's access rootline. * * @param Tx_Solr_Access_Rootline $accessRootline The page's access rootline */ public function setPageAccessRootline(Tx_Solr_Access_Rootline $accessRootline) { $this->pageAccessRootline = $accessRootline; } /** * Gets the current page's Solr document ID. * * @return string|NULL The page's Solr document ID or NULL in case no document was generated yet. */ public static function getPageSolrDocumentId() { return self::$pageSolrDocumentId; } /** * Gets the Solr document generated for the current page. * * @return Apache_Solr_Document|NULL The page's Solr document or NULL if it has not been generated yet. */ public static function getPageSolrDocument() { return self::$pageSolrDocument; } /** * Gets the documents that have been sent to Solr * * @return array An array of Apache_Solr_Document objects */ public function getDocumentsSentToSolr() { return $this->documentsSentToSolr; } /** * Gets a comma separated list of frontend user groups to use for the * document ID. * * @return string A comma separated list of frontend user groups. */ protected function getDocumentIdGroups() { $groups = $this->pageAccessRootline->getGroups(); $groups = Tx_Solr_Access_Rootline::cleanGroupArray($groups); if (empty($groups)) { $groups[] = 0; } $groups = implode(',', $groups); return $groups; } }
1
5,512
Please revert this change
TYPO3-Solr-ext-solr
php
@@ -57,13 +57,10 @@ module OrgAdmin # preview a phase # GET /org_admin/phases/[:id]/preview def preview - phase = Phase.includes(:template).find(params[:id]) - authorize phase - render("/org_admin/phases/preview", - locals: { - template: phase.template, - phase: phase - }) + @phase = Phase.includes(:template).find(params[:id]) + authorize @phase + @template = @phase.template + @guidance_presenter = GuidancePresenter.new(Plan.new(template: @phase.template)) end # add a new phase to a passed template
1
# frozen_string_literal: true module OrgAdmin class PhasesController < ApplicationController include Versionable after_action :verify_authorized # GET /org_admin/templates/:template_id/phases/[:id] def show phase = Phase.includes(:template, :sections).order(:number).find(params[:id]) authorize phase if !phase.template.latest? # rubocop:disable Metrics/LineLength flash[:notice] = _("You are viewing a historical version of this template. You will not be able to make changes.") # rubocop:enable Metrics/LineLength end render("container", locals: { partial_path: "show", template: phase.template, phase: phase, prefix_section: phase.prefix_section, sections: phase.template_sections.order(:number), suffix_sections: phase.suffix_sections.order(:number), current_section: Section.find_by(id: params[:section], phase_id: phase.id) }) end # GET /org_admin/templates/:template_id/phases/[:id]/edit def edit phase = Phase.includes(:template).find(params[:id]) authorize phase # User cannot edit a phase if its a customization so redirect to show if phase.template.customization_of.present? || !phase.template.latest? redirect_to org_admin_template_phase_path( template_id: phase.template, id: phase.id, section: params[:section] ) else render("container", locals: { partial_path: "edit", template: phase.template, phase: phase, prefix_section: phase.prefix_section, sections: phase.sections.order(:number).select(:id, :title, :modifiable), suffix_sections: phase.suffix_sections.order(:number), current_section: Section.find_by(id: params[:section], phase_id: phase.id) }) end end # preview a phase # GET /org_admin/phases/[:id]/preview def preview phase = Phase.includes(:template).find(params[:id]) authorize phase render("/org_admin/phases/preview", locals: { template: phase.template, phase: phase }) end # add a new phase to a passed template # GET /org_admin/phases/new def new template = Template.includes(:phases).find(params[:template_id]) if template.latest? nbr = template.phases.maximum(:number) phase = Phase.new( template: template, modifiable: true, number: (nbr.present? ? nbr + 1 : 1) ) authorize phase local_referrer = if request.referrer.present? request.referrer else org_admin_templates_path end render("/org_admin/templates/container", locals: { partial_path: "new", template: template, phase: phase, referrer: local_referrer }) else render org_admin_templates_path, alert: _("You canot add a phase to a historical version of a template.") end end # create a phase # POST /org_admin/phases def create phase = Phase.new(phase_params) phase.template = Template.find(params[:template_id]) authorize phase begin phase = get_new(phase) phase.modifiable = true if phase.save flash[:notice] = success_message(_("phase"), _("created")) else flash[:alert] = failed_create_error(phase, _("phase")) end rescue StandardError => e flash[:alert] = _("Unable to create a new version of this template.") end if flash[:alert].present? redirect_to edit_org_admin_template_path(id: phase.template_id) else redirect_to edit_org_admin_template_phase_path(template_id: phase.template.id, id: phase.id) end end # update a phase of a template # PUT /org_admin/phases/[:id] def update phase = Phase.find(params[:id]) authorize phase begin phase = get_modifiable(phase) if phase.update(phase_params) flash[:notice] = success_message(_("phase"), _("updated")) else flash[:alert] = failed_update_error(phase, _("phase")) end rescue StandardError => e flash[:alert] = _("Unable to create a new version of this template.") end redirect_to edit_org_admin_template_phase_path(template_id: phase.template.id, id: phase.id) end def sort @phase = Phase.find(params[:id]) authorize @phase Section.update_numbers!(*params.fetch(:sort_order, []), parent: @phase) head :ok end # delete a phase # DELETE org_admin/phases/[:id] def destroy phase = Phase.includes(:template).find(params[:id]) authorize phase begin phase = get_modifiable(phase) template = phase.template if phase.destroy! flash[:notice] = success_message(_("phase"), _("deleted")) else flash[:alert] = failed_destroy_error(phase, _("phase")) end rescue StandardError => e flash[:alert] = _("Unable to create a new version of this template.") end if flash[:alert].present? redirect_to org_admin_template_phase_path(template.id, phase.id) else redirect_to edit_org_admin_template_path(template) end end private def phase_params params.require(:phase).permit(:title, :description, :number) end end end
1
18,027
Nice. I hadn't even heard of presenters in Rails before (learn something new every day) :)
DMPRoadmap-roadmap
rb
@@ -22,10 +22,19 @@ class Pool */ protected $providers = array(); + /** + * @var array + */ protected $contexts = array(); + /** + * @var DownloadStrategyInterface[] + */ protected $downloadSecurities = array(); + /** + * @var string + */ protected $defaultContext; /**
1
<?php /* * This file is part of the Sonata project. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\Provider; use Sonata\CoreBundle\Validator\ErrorElement; use Sonata\MediaBundle\Model\MediaInterface; use Sonata\MediaBundle\Security\DownloadStrategyInterface; class Pool { /** * @var array */ protected $providers = array(); protected $contexts = array(); protected $downloadSecurities = array(); protected $defaultContext; /** * @param string $context */ public function __construct($context) { $this->defaultContext = $context; } /** * @throws \RuntimeException * * @param string $name * * @return \Sonata\MediaBundle\Provider\MediaProviderInterface */ public function getProvider($name) { if (!$name) { throw new \InvalidArgumentException('Provider name cannot be empty, did you forget to call setProviderName() in your Media object?'); } if (empty($this->providers)) { throw new \RuntimeException(sprintf('Unable to retrieve provider named "%s" since there are no providers configured yet.', $name)); } if (!isset($this->providers[$name])) { throw new \InvalidArgumentException(sprintf('Unable to retrieve the provider named "%s". Available providers are %s.', $name, '"'.implode('", "', $this->getProviderList()).'"')); } return $this->providers[$name]; } /** * @param string $name * @param MediaProviderInterface $instance */ public function addProvider($name, MediaProviderInterface $instance) { $this->providers[$name] = $instance; } /** * @param string $name * @param \Sonata\MediaBundle\Security\DownloadStrategyInterface $security */ public function addDownloadSecurity($name, DownloadStrategyInterface $security) { $this->downloadSecurities[$name] = $security; } /** * @param array $providers */ public function setProviders($providers) { $this->providers = $providers; } /** * @return \Sonata\MediaBundle\Provider\MediaProviderInterface[] */ public function getProviders() { return $this->providers; } /** * @param string $name * @param array $providers * @param array $formats * @param array $download */ public function addContext($name, array $providers = array(), array $formats = array(), array $download = array()) { if (!$this->hasContext($name)) { $this->contexts[$name] = array( 'providers' => array(), 'formats' => array(), 'download' => array(), ); } $this->contexts[$name]['providers'] = $providers; $this->contexts[$name]['formats'] = $formats; $this->contexts[$name]['download'] = $download; } /** * @param string $name * * @return bool */ public function hasContext($name) { return isset($this->contexts[$name]); } /** * @param string $name * * @return array|null */ public function getContext($name) { if (!$this->hasContext($name)) { return; } return $this->contexts[$name]; } /** * Returns the context list. * * @return array */ public function getContexts() { return $this->contexts; } /** * @param string $name * * @return array */ public function getProviderNamesByContext($name) { $context = $this->getContext($name); if (!$context) { return; } return $context['providers']; } /** * @param string $name * * @return array */ public function getFormatNamesByContext($name) { $context = $this->getContext($name); if (!$context) { return; } return $context['formats']; } /** * @param string $name * * @return array */ public function getProvidersByContext($name) { $providers = array(); if (!$this->hasContext($name)) { return $providers; } foreach ($this->getProviderNamesByContext($name) as $name) { $providers[] = $this->getProvider($name); } return $providers; } /** * @return array */ public function getProviderList() { $choices = array(); foreach (array_keys($this->providers) as $name) { $choices[$name] = $name; } return $choices; } /** * @param \Sonata\MediaBundle\Model\MediaInterface $media * * @return \Sonata\MediaBundle\Security\DownloadStrategyInterface * * @throws \RuntimeException */ public function getDownloadSecurity(MediaInterface $media) { $context = $this->getContext($media->getContext()); $id = $context['download']['strategy']; if (!isset($this->downloadSecurities[$id])) { throw new \RuntimeException('Unable to retrieve the download security : '.$id); } return $this->downloadSecurities[$id]; } /** * @param \Sonata\MediaBundle\Model\MediaInterface $media * * @return string */ public function getDownloadMode(MediaInterface $media) { $context = $this->getContext($media->getContext()); return $context['download']['mode']; } /** * @return string */ public function getDefaultContext() { return $this->defaultContext; } /** * @param \Sonata\CoreBundle\Validator\ErrorElement $errorElement * @param \Sonata\MediaBundle\Model\MediaInterface $media */ public function validate(ErrorElement $errorElement, MediaInterface $media) { if (!$media->getProviderName()) { return; } $provider = $this->getProvider($media->getProviderName()); $provider->validate($errorElement, $media); } }
1
7,139
`string[]` ? not sure, can you verify this @core23 ? Thank you
sonata-project-SonataMediaBundle
php
@@ -1224,12 +1224,14 @@ define(["playbackManager", "dom", "inputManager", "datetime", "itemHelper", "med return null; } + let playPauseClickTimeout; function onViewHideStopPlayback() { if (playbackManager.isPlayingVideo()) { require(['shell'], function (shell) { shell.disableFullscreen(); }); + clearTimeout(playPauseClickTimeout); var player = currentPlayer; view.removeEventListener("viewbeforehide", onViewHideStopPlayback); releaseCurrentPlayer();
1
define(["playbackManager", "dom", "inputManager", "datetime", "itemHelper", "mediaInfo", "focusManager", "imageLoader", "scrollHelper", "events", "connectionManager", "browser", "globalize", "apphost", "layoutManager", "userSettings", "keyboardnavigation", "scrollStyles", "emby-slider", "paper-icon-button-light", "css!assets/css/videoosd"], function (playbackManager, dom, inputManager, datetime, itemHelper, mediaInfo, focusManager, imageLoader, scrollHelper, events, connectionManager, browser, globalize, appHost, layoutManager, userSettings, keyboardnavigation) { "use strict"; function seriesImageUrl(item, options) { if ("Episode" !== item.Type) { return null; } options = options || {}; options.type = options.type || "Primary"; if ("Primary" === options.type && item.SeriesPrimaryImageTag) { options.tag = item.SeriesPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } if ("Thumb" === options.type) { if (item.SeriesThumbImageTag) { options.tag = item.SeriesThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } if (item.ParentThumbImageTag) { options.tag = item.ParentThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options); } } return null; } function imageUrl(item, options) { options = options || {}; options.type = options.type || "Primary"; if (item.ImageTags && item.ImageTags[options.type]) { options.tag = item.ImageTags[options.type]; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.PrimaryImageItemId || item.Id, options); } if ("Primary" === options.type && item.AlbumId && item.AlbumPrimaryImageTag) { options.tag = item.AlbumPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options); } return null; } function logoImageUrl(item, apiClient, options) { options = options || {}; options.type = "Logo"; if (item.ImageTags && item.ImageTags.Logo) { options.tag = item.ImageTags.Logo; return apiClient.getScaledImageUrl(item.Id, options); } if (item.ParentLogoImageTag) { options.tag = item.ParentLogoImageTag; return apiClient.getScaledImageUrl(item.ParentLogoItemId, options); } return null; } return function (view, params) { function onVerticalSwipe(e, elem, data) { var player = currentPlayer; if (player) { var deltaY = data.currentDeltaY; var windowSize = dom.getWindowSize(); if (supportsBrightnessChange && data.clientX < windowSize.innerWidth / 2) { return void doBrightnessTouch(deltaY, player, windowSize.innerHeight); } doVolumeTouch(deltaY, player, windowSize.innerHeight); } } function doBrightnessTouch(deltaY, player, viewHeight) { var delta = -deltaY / viewHeight * 100; var newValue = playbackManager.getBrightness(player) + delta; newValue = Math.min(newValue, 100); newValue = Math.max(newValue, 0); playbackManager.setBrightness(newValue, player); } function doVolumeTouch(deltaY, player, viewHeight) { var delta = -deltaY / viewHeight * 100; var newValue = playbackManager.getVolume(player) + delta; newValue = Math.min(newValue, 100); newValue = Math.max(newValue, 0); playbackManager.setVolume(newValue, player); } function onDoubleClick(e) { var clientX = e.clientX; if (null != clientX) { if (clientX < dom.getWindowSize().innerWidth / 2) { playbackManager.rewind(currentPlayer); } else { playbackManager.fastForward(currentPlayer); } e.preventDefault(); e.stopPropagation(); } } function getDisplayItem(item) { if ("TvChannel" === item.Type) { var apiClient = connectionManager.getApiClient(item.ServerId); return apiClient.getItem(apiClient.getCurrentUserId(), item.Id).then(function (refreshedItem) { return { originalItem: refreshedItem, displayItem: refreshedItem.CurrentProgram }; }); } return Promise.resolve({ originalItem: item }); } function updateRecordingButton(item) { if (!item || "Program" !== item.Type) { if (recordingButtonManager) { recordingButtonManager.destroy(); recordingButtonManager = null; } return void view.querySelector(".btnRecord").classList.add("hide"); } connectionManager.getApiClient(item.ServerId).getCurrentUser().then(function (user) { if (user.Policy.EnableLiveTvManagement) { require(["recordingButton"], function (RecordingButton) { if (recordingButtonManager) { return void recordingButtonManager.refreshItem(item); } recordingButtonManager = new RecordingButton({ item: item, button: view.querySelector(".btnRecord") }); view.querySelector(".btnRecord").classList.remove("hide"); }); } }); } function updateDisplayItem(itemInfo) { var item = itemInfo.originalItem; currentItem = item; var displayItem = itemInfo.displayItem || item; updateRecordingButton(displayItem); setPoster(displayItem, item); var parentName = displayItem.SeriesName || displayItem.Album; if (displayItem.EpisodeTitle || displayItem.IsSeries) { parentName = displayItem.Name; } setTitle(displayItem, parentName); var titleElement; var osdTitle = view.querySelector(".osdTitle"); titleElement = osdTitle; var displayName = itemHelper.getDisplayName(displayItem, { includeParentInfo: "Program" !== displayItem.Type, includeIndexNumber: "Program" !== displayItem.Type }); if (!displayName) { displayItem.Type; } titleElement.innerHTML = displayName; if (displayName) { titleElement.classList.remove("hide"); } else { titleElement.classList.add("hide"); } var mediaInfoHtml = mediaInfo.getPrimaryMediaInfoHtml(displayItem, { runtime: false, subtitles: false, tomatoes: false, endsAt: false, episodeTitle: false, originalAirDate: "Program" !== displayItem.Type, episodeTitleIndexNumber: "Program" !== displayItem.Type, programIndicator: false }); var osdMediaInfo = view.querySelector(".osdMediaInfo"); osdMediaInfo.innerHTML = mediaInfoHtml; if (mediaInfoHtml) { osdMediaInfo.classList.remove("hide"); } else { osdMediaInfo.classList.add("hide"); } var secondaryMediaInfo = view.querySelector(".osdSecondaryMediaInfo"); var secondaryMediaInfoHtml = mediaInfo.getSecondaryMediaInfoHtml(displayItem, { startDate: false, programTime: false }); secondaryMediaInfo.innerHTML = secondaryMediaInfoHtml; if (secondaryMediaInfoHtml) { secondaryMediaInfo.classList.remove("hide"); } else { secondaryMediaInfo.classList.add("hide"); } if (displayName) { view.querySelector(".osdMainTextContainer").classList.remove("hide"); } else { view.querySelector(".osdMainTextContainer").classList.add("hide"); } if (enableProgressByTimeOfDay) { setDisplayTime(startTimeText, displayItem.StartDate); setDisplayTime(endTimeText, displayItem.EndDate); startTimeText.classList.remove("hide"); endTimeText.classList.remove("hide"); programStartDateMs = displayItem.StartDate ? datetime.parseISO8601Date(displayItem.StartDate).getTime() : 0; programEndDateMs = displayItem.EndDate ? datetime.parseISO8601Date(displayItem.EndDate).getTime() : 0; } else { startTimeText.classList.add("hide"); endTimeText.classList.add("hide"); startTimeText.innerHTML = ""; endTimeText.innerHTML = ""; programStartDateMs = 0; programEndDateMs = 0; } } function getDisplayTimeWithoutAmPm(date, showSeconds) { if (showSeconds) { return datetime.toLocaleTimeString(date, { hour: "numeric", minute: "2-digit", second: "2-digit" }).toLowerCase().replace("am", "").replace("pm", "").trim(); } return datetime.getDisplayTime(date).toLowerCase().replace("am", "").replace("pm", "").trim(); } function setDisplayTime(elem, date) { var html; if (date) { date = datetime.parseISO8601Date(date); html = getDisplayTimeWithoutAmPm(date); } elem.innerHTML = html || ""; } function shouldEnableProgressByTimeOfDay(item) { return !("TvChannel" !== item.Type || !item.CurrentProgram); } function updateNowPlayingInfo(player, state) { var item = state.NowPlayingItem; currentItem = item; if (!item) { setPoster(null); updateRecordingButton(null); Emby.Page.setTitle(""); nowPlayingVolumeSlider.disabled = true; nowPlayingPositionSlider.disabled = true; btnFastForward.disabled = true; btnRewind.disabled = true; view.querySelector(".btnSubtitles").classList.add("hide"); view.querySelector(".btnAudio").classList.add("hide"); view.querySelector(".osdTitle").innerHTML = ""; view.querySelector(".osdMediaInfo").innerHTML = ""; return; } enableProgressByTimeOfDay = shouldEnableProgressByTimeOfDay(item); getDisplayItem(item).then(updateDisplayItem); nowPlayingVolumeSlider.disabled = false; nowPlayingPositionSlider.disabled = false; btnFastForward.disabled = false; btnRewind.disabled = false; if (playbackManager.subtitleTracks(player).length) { view.querySelector(".btnSubtitles").classList.remove("hide"); toggleSubtitleSync(); } else { view.querySelector(".btnSubtitles").classList.add("hide"); toggleSubtitleSync("forceToHide"); } if (playbackManager.audioTracks(player).length > 1) { view.querySelector(".btnAudio").classList.remove("hide"); } else { view.querySelector(".btnAudio").classList.add("hide"); } } function setTitle(item, parentName) { var url = logoImageUrl(item, connectionManager.getApiClient(item.ServerId), {}); if (url) { Emby.Page.setTitle(""); var pageTitle = document.querySelector(".pageTitle"); pageTitle.style.backgroundImage = "url('" + url + "')"; pageTitle.classList.add("pageTitleWithLogo"); pageTitle.classList.remove("pageTitleWithDefaultLogo"); pageTitle.innerHTML = ""; } else { Emby.Page.setTitle(parentName || ""); } var documentTitle = parentName || (item ? item.Name : null); if (documentTitle) { document.title = documentTitle; } } function setPoster(item, secondaryItem) { var osdPoster = view.querySelector(".osdPoster"); if (item) { var imgUrl = seriesImageUrl(item, { type: "Primary" }) || seriesImageUrl(item, { type: "Thumb" }) || imageUrl(item, { type: "Primary" }); if (!imgUrl && secondaryItem && (imgUrl = seriesImageUrl(secondaryItem, { type: "Primary" }) || seriesImageUrl(secondaryItem, { type: "Thumb" }) || imageUrl(secondaryItem, { type: "Primary" })), imgUrl) { return void (osdPoster.innerHTML = '<img src="' + imgUrl + '" />'); } } osdPoster.innerHTML = ""; } function showOsd() { slideDownToShow(headerElement); showMainOsdControls(); startOsdHideTimer(); } function hideOsd() { slideUpToHide(headerElement); hideMainOsdControls(); } function toggleOsd() { if ("osd" === currentVisibleMenu) { hideOsd(); } else if (!currentVisibleMenu) { showOsd(); } } function startOsdHideTimer() { stopOsdHideTimer(); osdHideTimeout = setTimeout(hideOsd, 5e3); } function stopOsdHideTimer() { if (osdHideTimeout) { clearTimeout(osdHideTimeout); osdHideTimeout = null; } } function slideDownToShow(elem) { elem.classList.remove("osdHeader-hidden"); } function slideUpToHide(elem) { elem.classList.add("osdHeader-hidden"); } function clearHideAnimationEventListeners(elem) { dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, { once: true }); } function onHideAnimationComplete(e) { var elem = e.target; if (elem != osdBottomElement) return; elem.classList.add("hide"); dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, { once: true }); } function showMainOsdControls() { if (!currentVisibleMenu) { var elem = osdBottomElement; currentVisibleMenu = "osd"; clearHideAnimationEventListeners(elem); elem.classList.remove("hide"); elem.classList.remove("videoOsdBottom-hidden"); if (!layoutManager.mobile) { setTimeout(function () { focusManager.focus(elem.querySelector(".btnPause")); }, 50); } toggleSubtitleSync(); } } function hideMainOsdControls() { if ("osd" === currentVisibleMenu) { var elem = osdBottomElement; clearHideAnimationEventListeners(elem); elem.classList.add("videoOsdBottom-hidden"); dom.addEventListener(elem, transitionEndEventName, onHideAnimationComplete, { once: true }); currentVisibleMenu = null; toggleSubtitleSync("hide"); // Firefox does not blur by itself if (document.activeElement) { document.activeElement.blur(); } } } function onPointerMove(e) { if ("mouse" === (e.pointerType || (layoutManager.mobile ? "touch" : "mouse"))) { var eventX = e.screenX || 0; var eventY = e.screenY || 0; var obj = lastPointerMoveData; if (!obj) { lastPointerMoveData = { x: eventX, y: eventY }; return; } if (Math.abs(eventX - obj.x) < 10 && Math.abs(eventY - obj.y) < 10) { return; } obj.x = eventX; obj.y = eventY; showOsd(); } } function onInputCommand(e) { var player = currentPlayer; switch (e.detail.command) { case "left": if ("osd" === currentVisibleMenu) { showOsd(); } else { if (!currentVisibleMenu) { e.preventDefault(); playbackManager.rewind(player); } } break; case "right": if ("osd" === currentVisibleMenu) { showOsd(); } else if (!currentVisibleMenu) { e.preventDefault(); playbackManager.fastForward(player); } break; case "pageup": playbackManager.nextChapter(player); break; case "pagedown": playbackManager.previousChapter(player); break; case "up": case "down": case "select": case "menu": case "info": case "play": case "playpause": case "pause": case "fastforward": case "rewind": case "next": case "previous": showOsd(); break; case "record": onRecordingCommand(); showOsd(); break; case "togglestats": toggleStats(); } } function onRecordingCommand() { var btnRecord = view.querySelector(".btnRecord"); if (!btnRecord.classList.contains("hide")) { btnRecord.click(); } } function updateFullscreenIcon() { if (playbackManager.isFullscreen(currentPlayer)) { view.querySelector(".btnFullscreen").setAttribute("title", globalize.translate("ExitFullscreen")); view.querySelector(".btnFullscreen i").innerHTML = "fullscreen_exit"; } else { view.querySelector(".btnFullscreen").setAttribute("title", globalize.translate("Fullscreen") + " (f)"); view.querySelector(".btnFullscreen i").innerHTML = "fullscreen"; } } function onPlayerChange() { bindToPlayer(playbackManager.getCurrentPlayer()); } function onStateChanged(event, state) { var player = this; if (state.NowPlayingItem) { isEnabled = true; updatePlayerStateInternal(event, player, state); updatePlaylist(player); enableStopOnBack(true); } } function onPlayPauseStateChanged(e) { if (isEnabled) { updatePlayPauseState(this.paused()); } } function onVolumeChanged(e) { if (isEnabled) { var player = this; updatePlayerVolumeState(player, player.isMuted(), player.getVolume()); } } function onPlaybackStart(e, state) { console.log("nowplaying event: " + e.type); var player = this; onStateChanged.call(player, e, state); resetUpNextDialog(); } function resetUpNextDialog() { comingUpNextDisplayed = false; var dlg = currentUpNextDialog; if (dlg) { dlg.destroy(); currentUpNextDialog = null; } } function onPlaybackStopped(e, state) { currentRuntimeTicks = null; resetUpNextDialog(); console.log("nowplaying event: " + e.type); if ("Video" !== state.NextMediaType) { view.removeEventListener("viewbeforehide", onViewHideStopPlayback); Emby.Page.back(); } } function onMediaStreamsChanged(e) { var player = this; var state = playbackManager.getPlayerState(player); onStateChanged.call(player, { type: "init" }, state); } function onBeginFetch() { document.querySelector(".osdMediaStatus").classList.remove("hide"); } function onEndFetch() { document.querySelector(".osdMediaStatus").classList.add("hide"); } function bindToPlayer(player) { if (player !== currentPlayer) { releaseCurrentPlayer(); currentPlayer = player; if (!player) return; } var state = playbackManager.getPlayerState(player); onStateChanged.call(player, { type: "init" }, state); events.on(player, "playbackstart", onPlaybackStart); events.on(player, "playbackstop", onPlaybackStopped); events.on(player, "volumechange", onVolumeChanged); events.on(player, "pause", onPlayPauseStateChanged); events.on(player, "unpause", onPlayPauseStateChanged); events.on(player, "timeupdate", onTimeUpdate); events.on(player, "fullscreenchange", updateFullscreenIcon); events.on(player, "mediastreamschange", onMediaStreamsChanged); events.on(player, "beginFetch", onBeginFetch); events.on(player, "endFetch", onEndFetch); resetUpNextDialog(); if (player.isFetching) { onBeginFetch(); } } function releaseCurrentPlayer() { destroyStats(); destroySubtitleSync(); resetUpNextDialog(); var player = currentPlayer; if (player) { events.off(player, "playbackstart", onPlaybackStart); events.off(player, "playbackstop", onPlaybackStopped); events.off(player, "volumechange", onVolumeChanged); events.off(player, "pause", onPlayPauseStateChanged); events.off(player, "unpause", onPlayPauseStateChanged); events.off(player, "timeupdate", onTimeUpdate); events.off(player, "fullscreenchange", updateFullscreenIcon); events.off(player, "mediastreamschange", onMediaStreamsChanged); currentPlayer = null; } } function onTimeUpdate(e) { if (isEnabled) { var now = new Date().getTime(); if (!(now - lastUpdateTime < 700)) { lastUpdateTime = now; var player = this; currentRuntimeTicks = playbackManager.duration(player); var currentTime = playbackManager.currentTime(player); updateTimeDisplay(currentTime, currentRuntimeTicks, playbackManager.playbackStartTime(player), playbackManager.getBufferedRanges(player)); var item = currentItem; refreshProgramInfoIfNeeded(player, item); showComingUpNextIfNeeded(player, item, currentTime, currentRuntimeTicks); } } } function showComingUpNextIfNeeded(player, currentItem, currentTimeTicks, runtimeTicks) { if (runtimeTicks && currentTimeTicks && !comingUpNextDisplayed && !currentVisibleMenu && "Episode" === currentItem.Type && userSettings.enableNextVideoInfoOverlay()) { var showAtSecondsLeft = runtimeTicks >= 3e10 ? 40 : runtimeTicks >= 24e9 ? 35 : 30; var showAtTicks = runtimeTicks - 1e3 * showAtSecondsLeft * 1e4; var timeRemainingTicks = runtimeTicks - currentTimeTicks; if (currentTimeTicks >= showAtTicks && runtimeTicks >= 6e9 && timeRemainingTicks >= 2e8) { showComingUpNext(player); } } } function onUpNextHidden() { if ("upnext" === currentVisibleMenu) { currentVisibleMenu = null; } } function showComingUpNext(player) { require(["upNextDialog"], function (UpNextDialog) { if (!(currentVisibleMenu || currentUpNextDialog)) { currentVisibleMenu = "upnext"; comingUpNextDisplayed = true; playbackManager.nextItem(player).then(function (nextItem) { currentUpNextDialog = new UpNextDialog({ parent: view.querySelector(".upNextContainer"), player: player, nextItem: nextItem }); events.on(currentUpNextDialog, "hide", onUpNextHidden); }, onUpNextHidden); } }); } function refreshProgramInfoIfNeeded(player, item) { if ("TvChannel" === item.Type) { var program = item.CurrentProgram; if (program && program.EndDate) { try { var endDate = datetime.parseISO8601Date(program.EndDate); if (new Date().getTime() >= endDate.getTime()) { console.log("program info needs to be refreshed"); var state = playbackManager.getPlayerState(player); onStateChanged.call(player, { type: "init" }, state); } } catch (e) { console.log("Error parsing date: " + program.EndDate); } } } } function updatePlayPauseState(isPaused) { var button = view.querySelector(".btnPause i"); if (isPaused) { button.innerHTML = "play_arrow"; button.setAttribute("title", globalize.translate("ButtonPlay") + " (k)"); } else { button.innerHTML = "pause"; button.setAttribute("title", globalize.translate("ButtonPause") + " (k)"); } } function updatePlayerStateInternal(event, player, state) { var playState = state.PlayState || {}; updatePlayPauseState(playState.IsPaused); var supportedCommands = playbackManager.getSupportedCommands(player); currentPlayerSupportedCommands = supportedCommands; supportsBrightnessChange = -1 !== supportedCommands.indexOf("SetBrightness"); updatePlayerVolumeState(player, playState.IsMuted, playState.VolumeLevel); if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) { nowPlayingPositionSlider.disabled = !playState.CanSeek; } btnFastForward.disabled = !playState.CanSeek; btnRewind.disabled = !playState.CanSeek; var nowPlayingItem = state.NowPlayingItem || {}; playbackStartTimeTicks = playState.PlaybackStartTimeTicks; updateTimeDisplay(playState.PositionTicks, nowPlayingItem.RunTimeTicks, playState.PlaybackStartTimeTicks, playState.BufferedRanges || []); updateNowPlayingInfo(player, state); if (state.MediaSource && state.MediaSource.SupportsTranscoding && -1 !== supportedCommands.indexOf("SetMaxStreamingBitrate")) { view.querySelector(".btnVideoOsdSettings").classList.remove("hide"); } else { view.querySelector(".btnVideoOsdSettings").classList.add("hide"); } var isProgressClear = state.MediaSource && null == state.MediaSource.RunTimeTicks; nowPlayingPositionSlider.setIsClear(isProgressClear); if (nowPlayingItem.RunTimeTicks) { nowPlayingPositionSlider.setKeyboardSteps(userSettings.skipBackLength() * 1000000 / nowPlayingItem.RunTimeTicks, userSettings.skipForwardLength() * 1000000 / nowPlayingItem.RunTimeTicks); } if (-1 === supportedCommands.indexOf("ToggleFullscreen") || player.isLocalPlayer && layoutManager.tv && playbackManager.isFullscreen(player)) { view.querySelector(".btnFullscreen").classList.add("hide"); } else { view.querySelector(".btnFullscreen").classList.remove("hide"); } if (-1 === supportedCommands.indexOf("PictureInPicture")) { view.querySelector(".btnPip").classList.add("hide"); } else { view.querySelector(".btnPip").classList.remove("hide"); } if (-1 === supportedCommands.indexOf("AirPlay")) { view.querySelector(".btnAirPlay").classList.add("hide"); } else { view.querySelector(".btnAirPlay").classList.remove("hide"); } updateFullscreenIcon(); } function getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs) { return (currentTimeMs - programStartDateMs) / programRuntimeMs * 100; } function updateTimeDisplay(positionTicks, runtimeTicks, playbackStartTimeTicks, bufferedRanges) { if (enableProgressByTimeOfDay) { if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) { if (programStartDateMs && programEndDateMs) { var currentTimeMs = (playbackStartTimeTicks + (positionTicks || 0)) / 1e4; var programRuntimeMs = programEndDateMs - programStartDateMs; if (nowPlayingPositionSlider.value = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs), bufferedRanges.length) { var rangeStart = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].start || 0)) / 1e4); var rangeEnd = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].end || 0)) / 1e4); nowPlayingPositionSlider.setBufferedRanges([{ start: rangeStart, end: rangeEnd }]); } else { nowPlayingPositionSlider.setBufferedRanges([]); } } else { nowPlayingPositionSlider.value = 0; nowPlayingPositionSlider.setBufferedRanges([]); } } nowPlayingPositionText.innerHTML = ""; nowPlayingDurationText.innerHTML = ""; } else { if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) { if (runtimeTicks) { var pct = positionTicks / runtimeTicks; pct *= 100; nowPlayingPositionSlider.value = pct; } else { nowPlayingPositionSlider.value = 0; } if (runtimeTicks && null != positionTicks && currentRuntimeTicks && !enableProgressByTimeOfDay && currentItem.RunTimeTicks && "Recording" !== currentItem.Type) { endsAtText.innerHTML = "&nbsp;&nbsp;-&nbsp;&nbsp;" + mediaInfo.getEndsAtFromPosition(runtimeTicks, positionTicks, true); } else { endsAtText.innerHTML = ""; } } if (nowPlayingPositionSlider) { nowPlayingPositionSlider.setBufferedRanges(bufferedRanges, runtimeTicks, positionTicks); } updateTimeText(nowPlayingPositionText, positionTicks); updateTimeText(nowPlayingDurationText, runtimeTicks, true); } } function updatePlayerVolumeState(player, isMuted, volumeLevel) { var supportedCommands = currentPlayerSupportedCommands; var showMuteButton = true; var showVolumeSlider = true; var volumeSlider = view.querySelector('.osdVolumeSliderContainer'); var progressElement = volumeSlider.querySelector('.mdl-slider-background-lower'); if (-1 === supportedCommands.indexOf("Mute")) { showMuteButton = false; } if (-1 === supportedCommands.indexOf("SetVolume")) { showVolumeSlider = false; } if (player.isLocalPlayer && appHost.supports("physicalvolumecontrol")) { showMuteButton = false; showVolumeSlider = false; } if (isMuted) { view.querySelector(".buttonMute").setAttribute("title", globalize.translate("Unmute") + " (m)"); view.querySelector(".buttonMute i").innerHTML = "volume_off"; } else { view.querySelector(".buttonMute").setAttribute("title", globalize.translate("Mute") + " (m)"); view.querySelector(".buttonMute i").innerHTML = "volume_up"; } if (progressElement) { progressElement.style.width = (volumeLevel || 0) + '%'; } if (showMuteButton) { view.querySelector(".buttonMute").classList.remove("hide"); } else { view.querySelector(".buttonMute").classList.add("hide"); } if (nowPlayingVolumeSlider) { if (showVolumeSlider) { nowPlayingVolumeSliderContainer.classList.remove("hide"); } else { nowPlayingVolumeSliderContainer.classList.add("hide"); } if (!nowPlayingVolumeSlider.dragging) { nowPlayingVolumeSlider.value = volumeLevel || 0; } } } function updatePlaylist(player) { var btnPreviousTrack = view.querySelector(".btnPreviousTrack"); var btnNextTrack = view.querySelector(".btnNextTrack"); btnPreviousTrack.classList.remove("hide"); btnNextTrack.classList.remove("hide"); btnNextTrack.disabled = false; btnPreviousTrack.disabled = false; } function updateTimeText(elem, ticks, divider) { if (null == ticks) { elem.innerHTML = ""; return; } var html = datetime.getDisplayRunningTime(ticks); if (divider) { html = "&nbsp;/&nbsp;" + html; } elem.innerHTML = html; } function onSettingsButtonClick(e) { var btn = this; require(["playerSettingsMenu"], function (playerSettingsMenu) { var player = currentPlayer; if (player) { // show subtitle offset feature only if player and media support it var showSubOffset = playbackManager.supportSubtitleOffset(player) && playbackManager.canHandleOffsetOnCurrentSubtitle(player); playerSettingsMenu.show({ mediaType: "Video", player: player, positionTo: btn, stats: true, suboffset: showSubOffset, onOption: onSettingsOption }); } }); } function onSettingsOption(selectedOption) { if ("stats" === selectedOption) { toggleStats(); } else if ("suboffset" === selectedOption) { var player = currentPlayer; if (player) { playbackManager.enableShowingSubtitleOffset(player); toggleSubtitleSync(); } } } function toggleStats() { require(["playerStats"], function (PlayerStats) { var player = currentPlayer; if (player) { if (statsOverlay) { statsOverlay.toggle(); } else { statsOverlay = new PlayerStats({ player: player }); } } }); } function destroyStats() { if (statsOverlay) { statsOverlay.destroy(); statsOverlay = null; } } function showAudioTrackSelection() { var player = currentPlayer; var audioTracks = playbackManager.audioTracks(player); var currentIndex = playbackManager.getAudioStreamIndex(player); var menuItems = audioTracks.map(function (stream) { var opt = { name: stream.DisplayTitle, id: stream.Index }; if (stream.Index === currentIndex) { opt.selected = true; } return opt; }); var positionTo = this; require(["actionsheet"], function (actionsheet) { actionsheet.show({ items: menuItems, title: globalize.translate("Audio"), positionTo: positionTo }).then(function (id) { var index = parseInt(id); if (index !== currentIndex) { playbackManager.setAudioStreamIndex(index, player); } }); }); } function showSubtitleTrackSelection() { var player = currentPlayer; var streams = playbackManager.subtitleTracks(player); var currentIndex = playbackManager.getSubtitleStreamIndex(player); if (null == currentIndex) { currentIndex = -1; } streams.unshift({ Index: -1, DisplayTitle: globalize.translate("Off") }); var menuItems = streams.map(function (stream) { var opt = { name: stream.DisplayTitle, id: stream.Index }; if (stream.Index === currentIndex) { opt.selected = true; } return opt; }); var positionTo = this; require(["actionsheet"], function (actionsheet) { actionsheet.show({ title: globalize.translate("Subtitles"), items: menuItems, positionTo: positionTo }).then(function (id) { var index = parseInt(id); if (index !== currentIndex) { playbackManager.setSubtitleStreamIndex(index, player); } toggleSubtitleSync(); }); }); } function toggleSubtitleSync(action) { require(["subtitleSync"], function (SubtitleSync) { var player = currentPlayer; if (subtitleSyncOverlay) { subtitleSyncOverlay.toggle(action); } else if (player) { subtitleSyncOverlay = new SubtitleSync(player); } }); } function destroySubtitleSync() { if (subtitleSyncOverlay) { subtitleSyncOverlay.destroy(); subtitleSyncOverlay = null; } } /** * Clicked element. * To skip 'click' handling on Firefox/Edge. */ var clickedElement; function onWindowKeyDown(e) { clickedElement = e.srcElement; var key = keyboardnavigation.getKeyName(e); if (!currentVisibleMenu && 32 === e.keyCode) { playbackManager.playPause(currentPlayer); showOsd(); return; } if (layoutManager.tv && keyboardnavigation.isNavigationKey(key)) { showOsd(); return; } switch (key) { case "Enter": showOsd(); break; case "Escape": case "Back": // Ignore key when some dialog is opened if (currentVisibleMenu === "osd" && !document.querySelector(".dialogContainer")) { hideOsd(); e.stopPropagation(); } break; case "k": playbackManager.playPause(currentPlayer); showOsd(); break; case "l": case "ArrowRight": case "Right": playbackManager.fastForward(currentPlayer); showOsd(); break; case "j": case "ArrowLeft": case "Left": playbackManager.rewind(currentPlayer); showOsd(); break; case "f": if (!e.ctrlKey && !e.metaKey) { playbackManager.toggleFullscreen(currentPlayer); showOsd(); } break; case "m": playbackManager.toggleMute(currentPlayer); showOsd(); break; case "NavigationLeft": case "GamepadDPadLeft": case "GamepadLeftThumbstickLeft": // Ignores gamepad events that are always triggered, even when not focused. if (document.hasFocus()) { playbackManager.rewind(currentPlayer); showOsd(); } break; case "NavigationRight": case "GamepadDPadRight": case "GamepadLeftThumbstickRight": // Ignores gamepad events that are always triggered, even when not focused. if (document.hasFocus()) { playbackManager.fastForward(currentPlayer); showOsd(); } } } function onWindowMouseDown(e) { clickedElement = e.srcElement; } function onWindowTouchStart(e) { clickedElement = e.srcElement; } function getImgUrl(item, chapter, index, maxWidth, apiClient) { if (chapter.ImageTag) { return apiClient.getScaledImageUrl(item.Id, { maxWidth: maxWidth, tag: chapter.ImageTag, type: "Chapter", index: index }); } return null; } function getChapterBubbleHtml(apiClient, item, chapters, positionTicks) { var chapter; var index = -1; for (var i = 0, length = chapters.length; i < length; i++) { var currentChapter = chapters[i]; if (positionTicks >= currentChapter.StartPositionTicks) { chapter = currentChapter; index = i; } } if (!chapter) { return null; } var src = getImgUrl(item, chapter, index, 400, apiClient); if (src) { var html = '<div class="chapterThumbContainer">'; html += '<img class="chapterThumb" src="' + src + '" />'; html += '<div class="chapterThumbTextContainer">'; html += '<div class="chapterThumbText chapterThumbText-dim">'; html += chapter.Name; html += "</div>"; html += '<h2 class="chapterThumbText">'; html += datetime.getDisplayRunningTime(positionTicks); html += "</h2>"; html += "</div>"; return html + "</div>"; } return null; } function onViewHideStopPlayback() { if (playbackManager.isPlayingVideo()) { require(['shell'], function (shell) { shell.disableFullscreen(); }); var player = currentPlayer; view.removeEventListener("viewbeforehide", onViewHideStopPlayback); releaseCurrentPlayer(); playbackManager.stop(player); } } function enableStopOnBack(enabled) { view.removeEventListener("viewbeforehide", onViewHideStopPlayback); if (enabled && playbackManager.isPlayingVideo(currentPlayer)) { view.addEventListener("viewbeforehide", onViewHideStopPlayback); } } require(['shell'], function (shell) { shell.enableFullscreen(); }); var currentPlayer; var comingUpNextDisplayed; var currentUpNextDialog; var isEnabled; var currentItem; var recordingButtonManager; var enableProgressByTimeOfDay; var supportsBrightnessChange; var currentVisibleMenu; var statsOverlay; var osdHideTimeout; var lastPointerMoveData; var self = this; var currentPlayerSupportedCommands = []; var currentRuntimeTicks = 0; var lastUpdateTime = 0; var programStartDateMs = 0; var programEndDateMs = 0; var playbackStartTimeTicks = 0; var subtitleSyncOverlay; var volumeSliderTimer; var nowPlayingVolumeSlider = view.querySelector(".osdVolumeSlider"); var nowPlayingVolumeSliderContainer = view.querySelector(".osdVolumeSliderContainer"); var nowPlayingPositionSlider = view.querySelector(".osdPositionSlider"); var nowPlayingPositionText = view.querySelector(".osdPositionText"); var nowPlayingDurationText = view.querySelector(".osdDurationText"); var startTimeText = view.querySelector(".startTimeText"); var endTimeText = view.querySelector(".endTimeText"); var endsAtText = view.querySelector(".endsAtText"); var btnRewind = view.querySelector(".btnRewind"); var btnFastForward = view.querySelector(".btnFastForward"); var transitionEndEventName = dom.whichTransitionEvent(); var headerElement = document.querySelector(".skinHeader"); var osdBottomElement = document.querySelector(".videoOsdBottom-maincontrols"); if (layoutManager.tv) { nowPlayingPositionSlider.classList.add("focusable"); nowPlayingPositionSlider.enableKeyboardDragging(); } view.addEventListener("viewbeforeshow", function (e) { headerElement.classList.add("osdHeader"); Emby.Page.setTransparency("full"); }); view.addEventListener("viewshow", function (e) { try { events.on(playbackManager, "playerchange", onPlayerChange); bindToPlayer(playbackManager.getCurrentPlayer()); dom.addEventListener(document, window.PointerEvent ? "pointermove" : "mousemove", onPointerMove, { passive: true }); showOsd(); inputManager.on(window, onInputCommand); dom.addEventListener(window, "keydown", onWindowKeyDown, { capture: true }); dom.addEventListener(window, window.PointerEvent ? "pointerdown" : "mousedown", onWindowMouseDown, { passive: true }); dom.addEventListener(window, "touchstart", onWindowTouchStart, { passive: true }); } catch (e) { require(['appRouter'], function(appRouter) { appRouter.showDirect('/'); }); } }); view.addEventListener("viewbeforehide", function () { if (statsOverlay) { statsOverlay.enabled(false); } dom.removeEventListener(window, "keydown", onWindowKeyDown, { capture: true }); dom.removeEventListener(window, window.PointerEvent ? "pointerdown" : "mousedown", onWindowMouseDown, { passive: true }); dom.removeEventListener(window, "touchstart", onWindowTouchStart, { passive: true }); stopOsdHideTimer(); headerElement.classList.remove("osdHeader"); headerElement.classList.remove("osdHeader-hidden"); dom.removeEventListener(document, window.PointerEvent ? "pointermove" : "mousemove", onPointerMove, { passive: true }); inputManager.off(window, onInputCommand); events.off(playbackManager, "playerchange", onPlayerChange); releaseCurrentPlayer(); }); view.querySelector(".btnFullscreen").addEventListener("click", function () { playbackManager.toggleFullscreen(currentPlayer); }); view.querySelector(".btnPip").addEventListener("click", function () { playbackManager.togglePictureInPicture(currentPlayer); }); view.querySelector(".btnAirPlay").addEventListener("click", function () { playbackManager.toggleAirPlay(currentPlayer); }); view.querySelector(".btnVideoOsdSettings").addEventListener("click", onSettingsButtonClick); view.addEventListener("viewhide", function () { headerElement.classList.remove("hide"); }); view.addEventListener("viewdestroy", function () { if (self.touchHelper) { self.touchHelper.destroy(); self.touchHelper = null; } if (recordingButtonManager) { recordingButtonManager.destroy(); recordingButtonManager = null; } destroyStats(); destroySubtitleSync(); }); var lastPointerDown = 0; dom.addEventListener(view, window.PointerEvent ? "pointerdown" : "click", function (e) { if (dom.parentWithClass(e.target, ["videoOsdBottom", "upNextContainer"])) { return void showOsd(); } var pointerType = e.pointerType || (layoutManager.mobile ? "touch" : "mouse"); var now = new Date().getTime(); switch (pointerType) { case "touch": if (now - lastPointerDown > 300) { lastPointerDown = now; toggleOsd(); } break; case "mouse": if (!e.button) { playbackManager.playPause(currentPlayer); showOsd(); } break; default: playbackManager.playPause(currentPlayer); showOsd(); } }, { passive: true }); if (browser.touch) { dom.addEventListener(view, "dblclick", onDoubleClick, {}); } else { var options = { passive: true }; dom.addEventListener(view, "dblclick", function () { playbackManager.toggleFullscreen(currentPlayer); }, options); } view.querySelector(".buttonMute").addEventListener("click", function () { playbackManager.toggleMute(currentPlayer); }); nowPlayingVolumeSlider.addEventListener("change", function () { if (volumeSliderTimer) { // interupt and remove existing timer clearTimeout(volumeSliderTimer); volumeSliderTimer = null; } playbackManager.setVolume(this.value, currentPlayer); }); nowPlayingVolumeSlider.addEventListener("mousemove", function () { if (!volumeSliderTimer) { var that = this; // register new timer volumeSliderTimer = setTimeout(function() { playbackManager.setVolume(that.value, currentPlayer); // delete timer after completion volumeSliderTimer = null; }, 700); } }); nowPlayingVolumeSlider.addEventListener("touchmove", function () { if (!volumeSliderTimer) { var that = this; // register new timer volumeSliderTimer = setTimeout(function() { playbackManager.setVolume(that.value, currentPlayer); // delete timer after completion volumeSliderTimer = null; }, 700); } }); nowPlayingPositionSlider.addEventListener("change", function () { var player = currentPlayer; if (player) { var newPercent = parseFloat(this.value); if (enableProgressByTimeOfDay) { var seekAirTimeTicks = newPercent / 100 * (programEndDateMs - programStartDateMs) * 1e4; seekAirTimeTicks += 1e4 * programStartDateMs; seekAirTimeTicks -= playbackStartTimeTicks; playbackManager.seek(seekAirTimeTicks, player); } else { playbackManager.seekPercent(newPercent, player); } } }); nowPlayingPositionSlider.getBubbleHtml = function (value) { showOsd(); if (enableProgressByTimeOfDay) { if (programStartDateMs && programEndDateMs) { var ms = programEndDateMs - programStartDateMs; ms /= 100; ms *= value; ms += programStartDateMs; return '<h1 class="sliderBubbleText">' + getDisplayTimeWithoutAmPm(new Date(parseInt(ms)), true) + "</h1>"; } return "--:--"; } if (!currentRuntimeTicks) { return "--:--"; } var ticks = currentRuntimeTicks; ticks /= 100; ticks *= value; var item = currentItem; if (item && item.Chapters && item.Chapters.length && item.Chapters[0].ImageTag) { var html = getChapterBubbleHtml(connectionManager.getApiClient(item.ServerId), item, item.Chapters, ticks); if (html) { return html; } } return '<h1 class="sliderBubbleText">' + datetime.getDisplayRunningTime(ticks) + "</h1>"; }; view.querySelector(".btnPreviousTrack").addEventListener("click", function () { playbackManager.previousTrack(currentPlayer); }); view.querySelector(".btnPause").addEventListener("click", function () { // Ignore 'click' if another element was originally clicked (Firefox/Edge issue) if (this.contains(clickedElement)) { playbackManager.playPause(currentPlayer); } }); view.querySelector(".btnNextTrack").addEventListener("click", function () { playbackManager.nextTrack(currentPlayer); }); btnRewind.addEventListener("click", function () { playbackManager.rewind(currentPlayer); }); btnFastForward.addEventListener("click", function () { playbackManager.fastForward(currentPlayer); }); view.querySelector(".btnAudio").addEventListener("click", showAudioTrackSelection); view.querySelector(".btnSubtitles").addEventListener("click", showSubtitleTrackSelection); if (browser.touch) { (function () { require(["touchHelper"], function (TouchHelper) { self.touchHelper = new TouchHelper(view, { swipeYThreshold: 30, triggerOnMove: true, preventDefaultOnMove: true, ignoreTagNames: ["BUTTON", "INPUT", "TEXTAREA"] }); events.on(self.touchHelper, "swipeup", onVerticalSwipe); events.on(self.touchHelper, "swipedown", onVerticalSwipe); }); })(); } }; });
1
13,140
are we aware that this breaks older browsers like WebOS? :) they don't handle `let`
jellyfin-jellyfin-web
js
@@ -86,7 +86,7 @@ class Cache(CacheBase): if ttl == 0: self.delete(key) else: - # We can't use touch here because we need to update the TTL value in the record. + # We can't use touch here because we need to update the TTL value in the object. value = self.get(key) self.set(key, value, ttl)
1
import logging from functools import wraps from math import ceil, floor from time import time from pyramid.settings import aslist from kinto.core.cache import CacheBase from kinto.core.storage import exceptions from kinto.core.utils import json, memcache logger = logging.getLogger(__name__) def wrap_memcached_error(func): @wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except TypeError: raise except ( memcache.Client.MemcachedKeyError, memcache.Client.MemcachedStringEncodingError, ) as e: logger.exception(e) raise exceptions.BackendError(original=e) return wrapped def create_from_config(config, prefix=""): """Redis client instantiation from settings. """ settings = config.get_settings() hosts = aslist(settings[prefix + "hosts"]) return memcache.Client(hosts) class Cache(CacheBase): """Cache backend implementation using Redis. Enable in configuration:: kinto.cache_backend = kinto.core.cache.memcached *(Optional)* Instance location URI can be customized:: kinto.cache_hosts = 127.0.0.1:11211 127.0.0.1:11212 :noindex: """ def __init__(self, client, *args, **kwargs): super(Cache, self).__init__(*args, **kwargs) self._client = client def initialize_schema(self, dry_run=False): # Nothing to do. pass @wrap_memcached_error def flush(self): self._client.flush_all() @wrap_memcached_error def _get(self, key): value = self._client.get(self.prefix + key) if not value: return None, 0 data = json.loads(value) return data["value"], data["ttl"] def ttl(self, key): _, ttl = self._get(key) val = ttl - time() return floor(val) def get(self, key): value, _ = self._get(key) return value @wrap_memcached_error def expire(self, key, ttl): if ttl == 0: self.delete(key) else: # We can't use touch here because we need to update the TTL value in the record. value = self.get(key) self.set(key, value, ttl) @wrap_memcached_error def set(self, key, value, ttl): if isinstance(value, bytes): raise TypeError("a string-like object is required, not 'bytes'") value = json.dumps({"value": value, "ttl": ceil(time() + ttl)}) self._client.set(self.prefix + key, value, int(ttl)) @wrap_memcached_error def delete(self, key): value = self.get(key) self._client.delete(self.prefix + key) return value def load_from_config(config): settings = config.get_settings() client = create_from_config(config, prefix="cache_") return Cache(client, cache_prefix=settings["cache_prefix"])
1
11,965
This isn't really a Kinto record but a Memcached record. I could kind of go either way on this.
Kinto-kinto
py
@@ -582,6 +582,7 @@ public class OAuthWebviewHelper implements KeyChainAliasCallback { displayName(accountOptions.displayName).email(accountOptions.email). photoUrl(accountOptions.photoUrl).thumbnailUrl(accountOptions.thumbnailUrl). additionalOauthValues(accountOptions.additionalOauthValues).build(); + account.downloadProfilePhoto(); if (id.customAttributes != null) { mgr.getAdminSettingsManager().setPrefs(id.customAttributes, account); }
1
/* * Copyright (c) 2011-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.ui; import android.app.Activity; import android.app.PendingIntent; import android.content.Context; import android.content.Intent; import android.content.pm.ApplicationInfo; import android.content.pm.PackageManager; import android.content.res.Resources; import android.graphics.BitmapFactory; import android.net.Uri; import android.net.http.SslError; import android.os.AsyncTask; import android.os.Bundle; import android.security.KeyChain; import android.security.KeyChainAliasCallback; import android.security.KeyChainException; import android.support.customtabs.CustomTabsIntent; import android.text.TextUtils; import android.webkit.ClientCertRequest; import android.webkit.SslErrorHandler; import android.webkit.WebChromeClient; import android.webkit.WebSettings; import android.webkit.WebView; import android.webkit.WebViewClient; import android.widget.Toast; import com.salesforce.androidsdk.R; import com.salesforce.androidsdk.accounts.UserAccount; import com.salesforce.androidsdk.accounts.UserAccountBuilder; import com.salesforce.androidsdk.accounts.UserAccountManager; import com.salesforce.androidsdk.analytics.EventBuilderHelper; import com.salesforce.androidsdk.app.SalesforceSDKManager; import com.salesforce.androidsdk.auth.HttpAccess; import com.salesforce.androidsdk.auth.OAuth2; import com.salesforce.androidsdk.auth.OAuth2.IdServiceResponse; import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse; import com.salesforce.androidsdk.config.BootConfig; import com.salesforce.androidsdk.config.LoginServerManager; import com.salesforce.androidsdk.config.RuntimeConfig; import com.salesforce.androidsdk.push.PushMessaging; import com.salesforce.androidsdk.rest.ClientManager; import com.salesforce.androidsdk.rest.ClientManager.LoginOptions; import com.salesforce.androidsdk.security.PasscodeManager; import com.salesforce.androidsdk.util.EventsObservable; import com.salesforce.androidsdk.util.EventsObservable.EventType; import com.salesforce.androidsdk.util.MapUtil; import com.salesforce.androidsdk.util.SalesforceSDKLogger; import com.salesforce.androidsdk.util.UriFragmentParser; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.net.URI; import java.net.URISyntaxException; import java.security.PrivateKey; import java.security.cert.X509Certificate; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; /** * Helper class to manage a WebView instance that is going through the OAuth login process. * Basic flow is * a) load and show the login page to the user * b) user logins in and authorizes app * c) we see the navigation to the auth complete Url, and grab the tokens * d) we call the Id service to obtain additional info about the user * e) we create a local account, and return an authentication result bundle. * f) done! * */ public class OAuthWebviewHelper implements KeyChainAliasCallback { // Set a custom permission on your connected application with that name if you want // the application to be restricted to managed devices public static final String MUST_BE_MANAGED_APP_PERM = "must_be_managed_app"; public static final String AUTHENTICATION_FAILED_INTENT = "com.salesforce.auth.intent.AUTHENTICATION_ERROR"; public static final String HTTP_ERROR_RESPONSE_CODE_INTENT = "com.salesforce.auth.intent.HTTP_RESPONSE_CODE"; public static final String RESPONSE_ERROR_INTENT = "com.salesforce.auth.intent.RESPONSE_ERROR"; public static final String RESPONSE_ERROR_DESCRIPTION_INTENT = "com.salesforce.auth.intent.RESPONSE_ERROR_DESCRIPTION"; private static final String TAG = "OAuthWebViewHelper"; private static final String ACCOUNT_OPTIONS = "accountOptions"; // background executor private final ExecutorService threadPool = Executors.newFixedThreadPool(1); /** * the host activity/fragment should pass in an implementation of this * interface so that it can notify it of things it needs to do as part of * the oauth process. */ public interface OAuthWebviewHelperEvents { /** we're starting to load this login page into the webview */ void loadingLoginPage(String loginUrl); /** We've completed the auth process and here's the resulting Authentication Result bundle to return to the Authenticator */ void onAccountAuthenticatorResult(Bundle authResult); /** we're in some end state and requesting that the host activity be finished/closed. */ void finish(UserAccount userAccount); } /** * Construct a new OAuthWebviewHelper and perform the initial configuration of the Webview. */ public OAuthWebviewHelper(Activity activity, OAuthWebviewHelperEvents callback, LoginOptions options, WebView webview, Bundle savedInstanceState) { assert options != null && callback != null && webview != null && activity != null; this.activity = activity; this.callback = callback; this.loginOptions = options; this.webview = webview; final WebSettings webSettings = webview.getSettings(); webSettings.setJavaScriptEnabled(true); webSettings.setUserAgentString(SalesforceSDKManager.getInstance().getUserAgent()); webview.setWebViewClient(makeWebViewClient()); webview.setWebChromeClient(makeWebChromeClient()); /* * Restores WebView's state if available. * This ensures the user is not forced to type in credentials again * once the auth process has been kicked off. */ if (savedInstanceState != null) { webview.restoreState(savedInstanceState); accountOptions = AccountOptions.fromBundle(savedInstanceState.getBundle(ACCOUNT_OPTIONS)); } else { clearCookies(); } } private final OAuthWebviewHelperEvents callback; protected final LoginOptions loginOptions; private final WebView webview; private AccountOptions accountOptions; private Activity activity; private PrivateKey key; private X509Certificate[] certChain; public void saveState(Bundle outState) { webview.saveState(outState); if (accountOptions != null) { // we have completed the auth flow but not created the account, because we need to create a pin outState.putBundle(ACCOUNT_OPTIONS, accountOptions.asBundle()); } } public WebView getWebView() { return webview; } public void clearCookies() { SalesforceSDKManager.getInstance().removeAllCookies(); } public void clearView() { webview.loadUrl("about:blank"); } /** * Method called by login activity when it resumes after the passcode activity * * When the server has a mobile policy requiring a passcode, we start the passcode activity after completing the * auth flow (see onAuthFlowComplete). * When the passcode activity completes, the login activity's onActivityResult gets invoked, and it calls this method * to finalize the account creation. */ public void onNewPasscode() { /* * Re-encryption of existing accounts with the new passcode is taken * care of in the 'Confirm Passcode' step in PasscodeActivity. */ if (accountOptions != null) { final UserAccount addedAccount = addAccount(); callback.finish(addedAccount); } } /** Factory method for the WebViewClient, you can replace this with something else if you need to */ protected WebViewClient makeWebViewClient() { return new AuthWebViewClient(); } /** Factory method for the WebChromeClient, you can replace this with something else if you need to */ protected WebChromeClient makeWebChromeClient() { return new WebChromeClient(); } protected Context getContext() { return webview.getContext(); } /** * Called when the user facing part of the auth flow completed with an error. * We show the user an error and end the activity. * * @param error Error. * @param errorDesc Error description. * @param e Exception. */ protected void onAuthFlowError(String error, String errorDesc, Exception e) { SalesforceSDKLogger.w(TAG, error + ": " + errorDesc, e); // look for deny. kick them back to login, so clear cookies and repoint browser if ("access_denied".equals(error) && "end-user denied authorization".equals(errorDesc)) { webview.post(new Runnable() { @Override public void run() { clearCookies(); loadLoginPage(); } }); } else { Toast t = Toast.makeText(webview.getContext(), error + " : " + errorDesc, Toast.LENGTH_LONG); webview.postDelayed(new Runnable() { @Override public void run() { callback.finish(null); } }, t.getDuration()); t.show(); } final Intent intent = new Intent(AUTHENTICATION_FAILED_INTENT); if (e != null && e instanceof OAuth2.OAuthFailedException) { final OAuth2.OAuthFailedException exception = (OAuth2.OAuthFailedException) e; int statusCode = exception.getHttpStatusCode(); intent.putExtra(HTTP_ERROR_RESPONSE_CODE_INTENT, statusCode); final OAuth2.TokenErrorResponse errorResponse = exception.getTokenErrorResponse(); if (errorResponse != null) { final String tokenError = errorResponse.error; final String tokenErrorDesc = errorResponse.errorDescription; intent.putExtra(RESPONSE_ERROR_INTENT, tokenError); intent.putExtra(RESPONSE_ERROR_DESCRIPTION_INTENT, tokenErrorDesc); } } SalesforceSDKManager.getInstance().getAppContext().sendBroadcast(intent); } protected void showError(Exception exception) { Toast.makeText(getContext(), getContext().getString(R.string.sf__generic_error, exception.toString()), Toast.LENGTH_LONG).show(); } /** * Tells the webview to load the authorization page. * We also update the window title, so its easier to * see which system you're logging in to */ public void loadLoginPage() { if (TextUtils.isEmpty(loginOptions.getJwt())) { loginOptions.setLoginUrl(getLoginUrl()); doLoadPage(false); } else { new SwapJWTForAccessTokenTask().execute(loginOptions); } } private void doLoadPage(boolean jwtFlow) { try { URI uri = getAuthorizationUrl(jwtFlow); callback.loadingLoginPage(loginOptions.getLoginUrl()); if (SalesforceSDKManager.getInstance().isBrowserLoginEnabled()) { loadLoginPageInChrome(uri); } else { webview.loadUrl(uri.toString()); } } catch (URISyntaxException ex) { showError(ex); } } private void loadLoginPageInChrome(URI uri) { final Uri url = Uri.parse(uri.toString()); final CustomTabsIntent.Builder intentBuilder = new CustomTabsIntent.Builder(); /* * Sets custom animation to slide in and out for Chrome custom tab so that * it doesn't look like a swizzle out of the app and back in. */ intentBuilder.setStartAnimations(activity, android.R.anim.slide_in_left, android.R.anim.slide_out_right); intentBuilder.setExitAnimations(activity, android.R.anim.slide_in_left, android.R.anim.slide_out_right); // Replaces default 'Close Tab' button with a custom back arrow instead of 'x'. final Resources resources = activity.getResources(); intentBuilder.setCloseButtonIcon(BitmapFactory.decodeResource(resources, R.drawable.sf__action_back)); intentBuilder.setToolbarColor(resources.getColor(R.color.sf__chrome_nav_bar_azure)); // Adds a menu item to change server. final Intent changeServerIntent = new Intent(activity, ServerPickerActivity.class); final PendingIntent changeServerPendingIntent = PendingIntent.getActivity(activity, LoginActivity.PICK_SERVER_REQUEST_CODE, changeServerIntent, PendingIntent.FLAG_CANCEL_CURRENT); intentBuilder.addMenuItem(activity.getString(R.string.sf__pick_server), changeServerPendingIntent); final CustomTabsIntent customTabsIntent = intentBuilder.build(); /* * Sets the package explicitly to Google Chrome to avoid other browsers. This * ensures that we don't display a popup allowing the user to select a browser * because some browsers don't support certain authentication schemes. If Chrome * is not available, we will use the default browser that the device uses. */ if (doesChromeExist()) { customTabsIntent.intent.setPackage("com.android.chrome"); } /* * Prevents Chrome custom tab from staying in the activity history stack. This flag * ensures that Chrome custom tab is dismissed once the login process is complete. */ customTabsIntent.intent.setFlags(Intent.FLAG_ACTIVITY_NO_HISTORY); customTabsIntent.launchUrl(activity, url); } private boolean doesChromeExist() { boolean exists = false; final PackageManager packageManager = activity.getPackageManager(); ApplicationInfo applicationInfo = null; try { applicationInfo = packageManager.getApplicationInfo("com.android.chrome", 0); } catch (PackageManager.NameNotFoundException e) { SalesforceSDKLogger.w(TAG, "Chrome does not exist on this device", e); } if (applicationInfo != null) { exists = true; } return exists; } protected String getOAuthClientId() { return loginOptions.getOauthClientId(); } protected URI getAuthorizationUrl(Boolean jwtFlow) throws URISyntaxException { if (jwtFlow) { return OAuth2.getAuthorizationUrl(new URI(loginOptions.getLoginUrl()), getOAuthClientId(), loginOptions.getOauthCallbackUrl(), loginOptions.getOauthScopes(), getAuthorizationDisplayType(), loginOptions.getJwt(), loginOptions.getLoginUrl(), loginOptions.getAdditionalParameters()); } return OAuth2.getAuthorizationUrl(new URI(loginOptions.getLoginUrl()), getOAuthClientId(), loginOptions.getOauthCallbackUrl(), loginOptions.getOauthScopes(), getAuthorizationDisplayType(), loginOptions.getAdditionalParameters()); } protected URI getAuthorizationUrl() throws URISyntaxException { return getAuthorizationUrl(false); } /** * Override this to replace the default login webview's display param with * your custom display param. You can override this by either subclassing this class, * or adding "<string name="sf__oauth_display_type">desiredDisplayParam</string>" * to your app's resource so that it overrides the default value in the SDK library. * * @return the OAuth login display type, e.g. 'mobile', 'touch', * see the OAuth docs for the complete list of valid values. */ protected String getAuthorizationDisplayType() { return this.getContext().getString(R.string.oauth_display_type); } /** * Override this method to customize the login url. * @return login url */ protected String getLoginUrl() { return SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url.trim(); } /** * WebViewClient which intercepts the redirect to the oauth callback url. * That redirect marks the end of the user facing portion of the authentication flow. */ protected class AuthWebViewClient extends WebViewClient { @Override public void onPageFinished(WebView view, String url) { EventsObservable.get().notifyEvent(EventType.AuthWebViewPageFinished, url); super.onPageFinished(view, url); } @Override public boolean shouldOverrideUrlLoading(WebView view, String url) { boolean isDone = url.replace("///", "/").toLowerCase(Locale.US).startsWith(loginOptions.getOauthCallbackUrl().replace("///", "/").toLowerCase(Locale.US)); if (isDone) { Uri callbackUri = Uri.parse(url); Map<String, String> params = UriFragmentParser.parse(callbackUri); String error = params.get("error"); // Did we fail? if (error != null) { String errorDesc = params.get("error_description"); onAuthFlowError(error, errorDesc, null); } // Or succeed? else { TokenEndpointResponse tr = new TokenEndpointResponse(params); onAuthFlowComplete(tr); } } return isDone; } @Override public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) { int primError = error.getPrimaryError(); int primErrorStringId = R.string.sf__ssl_unknown_error; switch (primError) { case SslError.SSL_EXPIRED: primErrorStringId = R.string.sf__ssl_expired; break; case SslError.SSL_IDMISMATCH: primErrorStringId = R.string.sf__ssl_id_mismatch; break; case SslError.SSL_NOTYETVALID: primErrorStringId = R.string.sf__ssl_not_yet_valid; break; case SslError.SSL_UNTRUSTED: primErrorStringId = R.string.sf__ssl_untrusted; break; } // Building text message to show String text = getContext().getString(R.string.sf__ssl_error, getContext().getString(primErrorStringId)); SalesforceSDKLogger.e(TAG, "Received SSL error for server: " + text); // Bringing up toast Toast.makeText(getContext(), text, Toast.LENGTH_LONG).show(); handler.cancel(); } @Override public void onReceivedClientCertRequest(WebView view, ClientCertRequest request) { SalesforceSDKLogger.d(TAG, "Received client certificate request from server"); request.proceed(key, certChain); } } /** * Called when the user facing part of the auth flow completed successfully. * The last step is to call the identity service to get the username. */ protected void onAuthFlowComplete(TokenEndpointResponse tr) { FinishAuthTask t = new FinishAuthTask(); t.execute(tr); } private class SwapJWTForAccessTokenTask extends BaseFinishAuthFlowTask<LoginOptions> { @Override protected TokenEndpointResponse performRequest(LoginOptions options) { try { return OAuth2.swapJWTForTokens(HttpAccess.DEFAULT, new URI(options.getLoginUrl()), options.getJwt()); } catch (Exception e) { backgroundException = e; } return null; } @Override protected void onPostExecute(TokenEndpointResponse tr) { if (backgroundException != null) { handleJWTError(); loginOptions.setJwt(null); return; } if (tr != null && tr.authToken != null) { loginOptions.setJwt(tr.authToken); doLoadPage(true); } else { doLoadPage(false); handleJWTError(); } loginOptions.setJwt(null); } private void handleJWTError() { final SalesforceSDKManager mgr = SalesforceSDKManager.getInstance(); onAuthFlowError(getContext().getString(R.string.sf__generic_authentication_error_title), getContext().getString(R.string.sf__jwt_authentication_error), backgroundException); } } /** * Base class with common code for the background task that finishes off the auth process. */ protected abstract class BaseFinishAuthFlowTask<RequestType> extends AsyncTask<RequestType, Boolean, TokenEndpointResponse> { protected volatile Exception backgroundException; protected volatile IdServiceResponse id = null; public BaseFinishAuthFlowTask() { } @SafeVarargs @Override protected final TokenEndpointResponse doInBackground(RequestType ... params) { try { publishProgress(true); return performRequest(params[0]); } catch (Exception ex) { handleException(ex); } return null; } protected abstract TokenEndpointResponse performRequest(RequestType param) throws Exception; @Override protected void onPostExecute(OAuth2.TokenEndpointResponse tr) { final SalesforceSDKManager mgr = SalesforceSDKManager.getInstance(); // Failure cases. if (backgroundException != null) { SalesforceSDKLogger.w(TAG, "Exception thrown while retrieving token response", backgroundException); onAuthFlowError(getContext().getString(R.string.sf__generic_authentication_error_title), getContext().getString(R.string.sf__generic_authentication_error), backgroundException); callback.finish(null); return; } if (id.customPermissions != null) { final boolean mustBeManagedApp = id.customPermissions.optBoolean(MUST_BE_MANAGED_APP_PERM); if (mustBeManagedApp && !RuntimeConfig.getRuntimeConfig(getContext()).isManagedApp()) { onAuthFlowError(getContext().getString(R.string.sf__generic_authentication_error_title), getContext().getString(R.string.sf__managed_app_error), backgroundException); callback.finish(null); return; } } // Putting together all the information needed to create the new account. accountOptions = new AccountOptions(id.username, tr.refreshToken, tr.authToken, tr.idUrl, tr.instanceUrl, tr.orgId, tr.userId, tr.communityId, tr.communityUrl, id.firstName, id.lastName, id.displayName, id.email, id.pictureUrl, id.thumbnailUrl, tr.additionalOauthValues); // Sets additional admin prefs, if they exist. final UserAccount account = UserAccountBuilder.getInstance().authToken(accountOptions.authToken). refreshToken(accountOptions.refreshToken).loginServer(loginOptions.getLoginUrl()). idUrl(accountOptions.identityUrl).instanceServer(accountOptions.instanceUrl). orgId(accountOptions.orgId).userId(accountOptions.userId).username(accountOptions.username). accountName(buildAccountName(accountOptions.username, accountOptions.instanceUrl)). communityId(accountOptions.communityId).communityUrl(accountOptions.communityUrl). firstName(accountOptions.firstName).lastName(accountOptions.lastName). displayName(accountOptions.displayName).email(accountOptions.email). photoUrl(accountOptions.photoUrl).thumbnailUrl(accountOptions.thumbnailUrl). additionalOauthValues(accountOptions.additionalOauthValues).build(); if (id.customAttributes != null) { mgr.getAdminSettingsManager().setPrefs(id.customAttributes, account); } if (id.customPermissions != null) { mgr.getAdminPermsManager().setPrefs(id.customPermissions, account); } // Screen lock required by mobile policy. if (id.screenLockTimeout > 0) { // Stores the mobile policy for the org. final PasscodeManager passcodeManager = mgr.getPasscodeManager(); passcodeManager.storeMobilePolicyForOrg(account, id.screenLockTimeout * 1000 * 60, id.pinLength); passcodeManager.setTimeoutMs(id.screenLockTimeout * 1000 * 60); boolean changeRequired = passcodeManager.setMinPasscodeLength((Activity) getContext(), id.pinLength); /* * Checks if a passcode already exists. If a passcode has NOT * been created yet, the user is taken through the passcode * creation flow, at the end of which account data is encrypted. */ if (!passcodeManager.hasStoredPasscode(mgr.getAppContext())) { // This will bring up the create passcode screen - we will create the account in onResume. passcodeManager.setEnabled(true); passcodeManager.lockIfNeeded((Activity) getContext(), true); } else if (!changeRequired) { // If a passcode change is required, the lock screen will have already been set in setMinPasscodeLength. final UserAccount addedAccount = addAccount(); callback.finish(addedAccount); } } // No screen lock required or no mobile policy specified. else { final PasscodeManager passcodeManager = mgr.getPasscodeManager(); passcodeManager.storeMobilePolicyForOrg(account, 0, PasscodeManager.MIN_PASSCODE_LENGTH); final UserAccount addedAccount = addAccount(); callback.finish(addedAccount); } } protected void handleException(Exception ex) { if (ex.getMessage() != null) { SalesforceSDKLogger.w(TAG, "Exception thrown", ex); } backgroundException = ex; } } /** * This is a background process that will call the identity service to get the info we need from * the Identity service, and finally wrap up and create account. */ private class FinishAuthTask extends BaseFinishAuthFlowTask<TokenEndpointResponse> { @Override protected TokenEndpointResponse performRequest(TokenEndpointResponse tr) throws Exception { try { id = OAuth2.callIdentityService( HttpAccess.DEFAULT, tr.idUrlWithInstance, tr.authToken); } catch(Exception e) { backgroundException = e; } return tr; } } protected UserAccount addAccount() { ClientManager clientManager = new ClientManager(getContext(), SalesforceSDKManager.getInstance().getAccountType(), loginOptions, SalesforceSDKManager.getInstance().shouldLogoutWhenTokenRevoked()); // Create account name (shown in Settings -> Accounts & sync) String accountName = buildAccountName(accountOptions.username, accountOptions.instanceUrl); // New account Bundle extras = clientManager.createNewAccount(accountName, accountOptions.username, accountOptions.refreshToken, accountOptions.authToken, accountOptions.instanceUrl, loginOptions.getLoginUrl(), accountOptions.identityUrl, getOAuthClientId(), accountOptions.orgId, accountOptions.userId, accountOptions.communityId, accountOptions.communityUrl, accountOptions.firstName, accountOptions.lastName, accountOptions.displayName, accountOptions.email, accountOptions.photoUrl, accountOptions.thumbnailUrl, accountOptions.additionalOauthValues); /* * Registers for push notifications, if push notification client ID is present. * This step needs to happen after the account has been added by client * manager, so that the push service has all the account info it needs. */ final Context appContext = SalesforceSDKManager.getInstance().getAppContext(); final String pushNotificationId = BootConfig.getBootConfig(appContext).getPushNotificationClientId(); final UserAccount account = UserAccountBuilder.getInstance().authToken(accountOptions.authToken). refreshToken(accountOptions.refreshToken).loginServer(loginOptions.getLoginUrl()). idUrl(accountOptions.identityUrl).instanceServer(accountOptions.instanceUrl). orgId(accountOptions.orgId).userId(accountOptions.userId).username(accountOptions.username). accountName(accountName).communityId(accountOptions.communityId). communityUrl(accountOptions.communityUrl).firstName(accountOptions.firstName). lastName(accountOptions.lastName).displayName(accountOptions.displayName). email(accountOptions.email).photoUrl(accountOptions.photoUrl). thumbnailUrl(accountOptions.thumbnailUrl). additionalOauthValues(accountOptions.additionalOauthValues).build(); if (!TextUtils.isEmpty(pushNotificationId)) { PushMessaging.register(appContext, account); } callback.onAccountAuthenticatorResult(extras); if (SalesforceSDKManager.getInstance().getIsTestRun()) { logAddAccount(account); } else { threadPool.execute(new Runnable() { @Override public void run() { logAddAccount(account); } }); } return account; } /** * Log the addition of a new account. * @param account */ private void logAddAccount(UserAccount account) { final JSONObject attributes = new JSONObject(); try { final List<UserAccount> users = UserAccountManager.getInstance().getAuthenticatedUsers(); attributes.put("numUsers", (users == null) ? 0 : users.size()); final List<LoginServerManager.LoginServer> servers = SalesforceSDKManager.getInstance().getLoginServerManager().getLoginServers(); attributes.put("numLoginServers", (servers == null) ? 0 : servers.size()); if (servers != null) { final JSONArray serversJson = new JSONArray(); for (final LoginServerManager.LoginServer server : servers) { if (server != null) { serversJson.put(server.url); } } attributes.put("loginServers", serversJson); } EventBuilderHelper.createAndStoreEventSync("addUser", account, TAG, attributes); } catch (JSONException e) { SalesforceSDKLogger.e(TAG, "Exception thrown while creating JSON", e); } } /** * @return name to be shown for account in Settings -> Accounts & Sync */ protected String buildAccountName(String username, String instanceServer) { return String.format("%s (%s) (%s)", username, instanceServer, SalesforceSDKManager.getInstance().getApplicationName()); } /** * Class encapsulating the parameters required to create a new account. */ public static class AccountOptions { private static final String USER_ID = "userId"; private static final String ORG_ID = "orgId"; private static final String IDENTITY_URL = "identityUrl"; private static final String INSTANCE_URL = "instanceUrl"; private static final String AUTH_TOKEN = "authToken"; private static final String REFRESH_TOKEN = "refreshToken"; private static final String USERNAME = "username"; private static final String COMMUNITY_ID = "communityId"; private static final String COMMUNITY_URL = "communityUrl"; private static final String FIRST_NAME = "firstName"; private static final String LAST_NAME = "lastName"; private static final String DISPLAY_NAME = "displayName"; private static final String EMAIL = "email"; private static final String PHOTO_URL = "photoUrl"; private static final String THUMBNAIL_URL = "thumbnailUrl"; public final String username; public final String refreshToken; public final String authToken; public final String identityUrl; public final String instanceUrl; public final String orgId; public final String userId; public final String communityId; public final String communityUrl; public final String firstName; public final String lastName; public final String displayName; public final String email; public final String photoUrl; public final String thumbnailUrl; public final Map<String, String> additionalOauthValues; private Bundle bundle; public AccountOptions(String username, String refreshToken, String authToken, String identityUrl, String instanceUrl, String orgId, String userId, String communityId, String communityUrl, String firstName, String lastName, String displayName, String email, String photoUrl, String thumbnailUrl, Map<String, String> additionalOauthValues) { super(); this.username = username; this.refreshToken = refreshToken; this.authToken = authToken; this.identityUrl = identityUrl; this.instanceUrl = instanceUrl; this.orgId = orgId; this.userId = userId; this.communityId = communityId; this.communityUrl = communityUrl; this.firstName = firstName; this.lastName = lastName; this.displayName = displayName; this.email = email; this.photoUrl = photoUrl; this.thumbnailUrl = thumbnailUrl; this.additionalOauthValues = additionalOauthValues; bundle = new Bundle(); bundle.putString(USERNAME, username); bundle.putString(REFRESH_TOKEN, refreshToken); bundle.putString(AUTH_TOKEN, authToken); bundle.putString(IDENTITY_URL, identityUrl); bundle.putString(INSTANCE_URL, instanceUrl); bundle.putString(ORG_ID, orgId); bundle.putString(USER_ID, userId); bundle.putString(COMMUNITY_ID, communityId); bundle.putString(COMMUNITY_URL, communityUrl); bundle.putString(FIRST_NAME, firstName); bundle.putString(LAST_NAME, lastName); bundle.putString(DISPLAY_NAME, displayName); bundle.putString(EMAIL, email); bundle.putString(PHOTO_URL, photoUrl); bundle.putString(THUMBNAIL_URL, thumbnailUrl); bundle = MapUtil.addMapToBundle(additionalOauthValues, SalesforceSDKManager.getInstance().getAdditionalOauthKeys(), bundle); } public Bundle asBundle() { return bundle; } public static AccountOptions fromBundle(Bundle options) { if (options == null) { return null; } return new AccountOptions( options.getString(USERNAME), options.getString(REFRESH_TOKEN), options.getString(AUTH_TOKEN), options.getString(IDENTITY_URL), options.getString(INSTANCE_URL), options.getString(ORG_ID), options.getString(USER_ID), options.getString(COMMUNITY_ID), options.getString(COMMUNITY_URL), options.getString(FIRST_NAME), options.getString(LAST_NAME), options.getString(DISPLAY_NAME), options.getString(EMAIL), options.getString(PHOTO_URL), options.getString(THUMBNAIL_URL), getAdditionalOauthValues(options) ); } private static Map<String, String> getAdditionalOauthValues(Bundle options) { return MapUtil.addBundleToMap(options, SalesforceSDKManager.getInstance().getAdditionalOauthKeys(), null); } } @Override public void alias(String alias) { try { SalesforceSDKLogger.d(TAG, "Keychain alias callback received"); certChain = KeyChain.getCertificateChain(activity, alias); key = KeyChain.getPrivateKey(activity, alias); activity.runOnUiThread(new Runnable() { @Override public void run() { loadLoginPage(); } }); } catch (KeyChainException e) { SalesforceSDKLogger.e(TAG, "Exception thrown while retrieving X.509 certificate", e); } catch (InterruptedException e) { SalesforceSDKLogger.e(TAG, "Exception thrown while retrieving X.509 certificate", e); } } }
1
16,959
Kick off the download after login.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -286,7 +286,7 @@ class ErrorBaseline $xml = preg_replace_callback( '/<files (psalm-version="[^"]+") (?:php-version="(.+)"(\/?>)\n)/', /** - * @param array<int, string> $matches + * @param string[] $matches */ function (array $matches) : string { return
1
<?php namespace Psalm; use function array_filter; use function array_intersect; use function array_map; use function array_merge; use function array_reduce; use function get_loaded_extensions; use function implode; use function ksort; use const LIBXML_NOBLANKS; use function min; use const PHP_VERSION; use function phpversion; use function preg_replace_callback; use Psalm\Internal\Analyzer\IssueData; use Psalm\Internal\Provider\FileProvider; use RuntimeException; use function str_replace; use function strpos; use function usort; use function array_values; class ErrorBaseline { /** * @param array<string,array<string,array{o:int, s:array<int, string>}>> $existingIssues * * * @psalm-pure */ public static function countTotalIssues(array $existingIssues): int { $totalIssues = 0; foreach ($existingIssues as $existingIssue) { $totalIssues += array_reduce( $existingIssue, /** * @param array{o:int, s:array<int, string>} $existingIssue */ function (int $carry, array $existingIssue): int { return $carry + $existingIssue['o']; }, 0 ); } return $totalIssues; } /** * @param array<string, list<IssueData>> $issues * */ public static function create( FileProvider $fileProvider, string $baselineFile, array $issues, bool $include_php_versions ): void { $groupedIssues = self::countIssueTypesByFile($issues); self::writeToFile($fileProvider, $baselineFile, $groupedIssues, $include_php_versions); } /** * @return array<string,array<string,array{o:int, s: list<string>}>> * * @throws Exception\ConfigException */ public static function read(FileProvider $fileProvider, string $baselineFile): array { if (!$fileProvider->fileExists($baselineFile)) { throw new Exception\ConfigException("{$baselineFile} does not exist or is not readable"); } $xmlSource = $fileProvider->getContents($baselineFile); $baselineDoc = new \DOMDocument(); $baselineDoc->loadXML($xmlSource, LIBXML_NOBLANKS); $filesElement = $baselineDoc->getElementsByTagName('files'); if ($filesElement->length === 0) { throw new Exception\ConfigException('Baseline file does not contain <files>'); } $files = []; /** @var \DOMElement $filesElement */ $filesElement = $filesElement[0]; foreach ($filesElement->getElementsByTagName('file') as $file) { $fileName = $file->getAttribute('src'); $fileName = str_replace('\\', '/', $fileName); $files[$fileName] = []; foreach ($file->childNodes as $issue) { if (!$issue instanceof \DOMElement) { continue; } $issueType = $issue->tagName; $files[$fileName][$issueType] = [ 'o' => (int)$issue->getAttribute('occurrences'), 's' => [], ]; $codeSamples = $issue->getElementsByTagName('code'); foreach ($codeSamples as $codeSample) { $files[$fileName][$issueType]['s'][] = $codeSample->textContent; } } } return $files; } /** * @param array<string, list<IssueData>> $issues * * @return array<string, array<string, array{o: int, s: list<string>}>> * * @throws Exception\ConfigException */ public static function update( FileProvider $fileProvider, string $baselineFile, array $issues, bool $include_php_versions ): array { $existingIssues = self::read($fileProvider, $baselineFile); $newIssues = self::countIssueTypesByFile($issues); foreach ($existingIssues as $file => &$existingIssuesCount) { if (!isset($newIssues[$file])) { unset($existingIssues[$file]); continue; } foreach ($existingIssuesCount as $issueType => $existingIssueType) { if (!isset($newIssues[$file][$issueType])) { unset($existingIssuesCount[$issueType]); continue; } $existingIssuesCount[$issueType]['o'] = min( $existingIssueType['o'], $newIssues[$file][$issueType]['o'] ); $existingIssuesCount[$issueType]['s'] = array_intersect( $existingIssueType['s'], $newIssues[$file][$issueType]['s'] ); } } $groupedIssues = array_filter($existingIssues); self::writeToFile($fileProvider, $baselineFile, $groupedIssues, $include_php_versions); return $groupedIssues; } /** * @param array<string, list<IssueData>> $issues * * @return array<string,array<string,array{o:int, s:array<int, string>}>> */ private static function countIssueTypesByFile(array $issues): array { if ($issues === []) { return []; } $groupedIssues = array_reduce( array_merge(...array_values($issues)), /** * @param array<string,array<string,array{o:int, s:array<int, string>}>> $carry * * @return array<string,array<string,array{o:int, s:array<int, string>}>> */ function (array $carry, IssueData $issue): array { if ($issue->severity !== Config::REPORT_ERROR) { return $carry; } $fileName = $issue->file_name; $fileName = str_replace('\\', '/', $fileName); $issueType = $issue->type; if (!isset($carry[$fileName])) { $carry[$fileName] = []; } if (!isset($carry[$fileName][$issueType])) { $carry[$fileName][$issueType] = ['o' => 0, 's' => []]; } ++$carry[$fileName][$issueType]['o']; if (!strpos($issue->selected_text, "\n")) { $carry[$fileName][$issueType]['s'][] = $issue->selected_text; } return $carry; }, [] ); // Sort files first ksort($groupedIssues); foreach ($groupedIssues as &$issues) { ksort($issues); } return $groupedIssues; } /** * @param array<string,array<string,array{o:int, s:array<int, string>}>> $groupedIssues * */ private static function writeToFile( FileProvider $fileProvider, string $baselineFile, array $groupedIssues, bool $include_php_versions ): void { $baselineDoc = new \DOMDocument('1.0', 'UTF-8'); $filesNode = $baselineDoc->createElement('files'); $filesNode->setAttribute('psalm-version', PSALM_VERSION); if ($include_php_versions) { $extensions = array_merge(get_loaded_extensions(), get_loaded_extensions(true)); usort($extensions, 'strnatcasecmp'); $filesNode->setAttribute('php-version', implode(';' . "\n\t", array_merge( [ ('php:' . PHP_VERSION), ], array_map( function (string $extension) : string { return $extension . ':' . phpversion($extension); }, $extensions ) ))); } foreach ($groupedIssues as $file => $issueTypes) { $fileNode = $baselineDoc->createElement('file'); $fileNode->setAttribute('src', $file); foreach ($issueTypes as $issueType => $existingIssueType) { $issueNode = $baselineDoc->createElement($issueType); $issueNode->setAttribute('occurrences', (string)$existingIssueType['o']); \sort($existingIssueType['s']); foreach ($existingIssueType['s'] as $selection) { $codeNode = $baselineDoc->createElement('code'); $codeNode->textContent = $selection; $issueNode->appendChild($codeNode); } $fileNode->appendChild($issueNode); } $filesNode->appendChild($fileNode); } $baselineDoc->appendChild($filesNode); $baselineDoc->formatOutput = true; $xml = preg_replace_callback( '/<files (psalm-version="[^"]+") (?:php-version="(.+)"(\/?>)\n)/', /** * @param array<int, string> $matches */ function (array $matches) : string { return '<files' . "\n " . $matches[1] . "\n" . ' php-version="' . "\n " . str_replace('&#10;&#9;', "\n ", $matches[2]). "\n" . ' "' . "\n" . $matches[3] . "\n"; }, $baselineDoc->saveXML() ); if ($xml === null) { throw new RuntimeException('Failed to reformat opening attributes!'); } $fileProvider->setContents($baselineFile, $xml); } }
1
10,318
I don't think you need to change this, `array<int, string>` should be accepted whenever `string[]` is expected. (`string[]` is an alias of `array<string>` itself an alias of `array<array-key, string>` which is a parent type of `array<int, string>`
vimeo-psalm
php
@@ -1257,8 +1257,9 @@ bool wlr_seat_touch_has_grab(struct wlr_seat *seat) { } bool wlr_seat_validate_grab_serial(struct wlr_seat *seat, uint32_t serial) { - return serial == seat->pointer_state.grab_serial || - serial == seat->touch_state.grab_serial; + return true; + //return serial == seat->pointer_state.grab_serial || + // serial == seat->touch_state.grab_serial; } struct wlr_seat_client *wlr_seat_client_from_resource(
1
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <wayland-server.h> #include <wlr/types/wlr_data_device.h> #include <wlr/types/wlr_input_device.h> #include <wlr/types/wlr_primary_selection.h> #include <wlr/types/wlr_seat.h> #include <wlr/util/log.h> #include "util/signal.h" static void resource_destroy(struct wl_client *client, struct wl_resource *resource) { wl_resource_destroy(resource); } static void pointer_send_frame(struct wl_resource *resource) { if (wl_resource_get_version(resource) >= WL_POINTER_FRAME_SINCE_VERSION) { wl_pointer_send_frame(resource); } } static const struct wl_pointer_interface wl_pointer_impl; static struct wlr_seat_client *seat_client_from_pointer_resource( struct wl_resource *resource) { assert(wl_resource_instance_of(resource, &wl_pointer_interface, &wl_pointer_impl)); return wl_resource_get_user_data(resource); } static void wl_pointer_set_cursor(struct wl_client *client, struct wl_resource *pointer_resource, uint32_t serial, struct wl_resource *surface_resource, int32_t hotspot_x, int32_t hotspot_y) { struct wlr_seat_client *seat_client = seat_client_from_pointer_resource(pointer_resource); struct wlr_surface *surface = NULL; if (surface_resource != NULL) { surface = wlr_surface_from_resource(surface_resource); if (wlr_surface_set_role(surface, "wl_pointer-cursor", surface_resource, WL_POINTER_ERROR_ROLE) < 0) { return; } } struct wlr_seat_pointer_request_set_cursor_event *event = calloc(1, sizeof(struct wlr_seat_pointer_request_set_cursor_event)); if (event == NULL) { return; } event->seat_client = seat_client; event->surface = surface; event->serial = serial; event->hotspot_x = hotspot_x; event->hotspot_y = hotspot_y; wlr_signal_emit_safe(&seat_client->seat->events.request_set_cursor, event); free(event); } static const struct wl_pointer_interface wl_pointer_impl = { .set_cursor = wl_pointer_set_cursor, .release = resource_destroy, }; static void wl_pointer_destroy(struct wl_resource *resource) { wl_list_remove(wl_resource_get_link(resource)); } static void wl_seat_get_pointer(struct wl_client *client, struct wl_resource *seat_resource, uint32_t id) { struct wlr_seat_client *seat_client = wlr_seat_client_from_resource(seat_resource); if (!(seat_client->seat->capabilities & WL_SEAT_CAPABILITY_POINTER)) { return; } struct wl_resource *resource = wl_resource_create(client, &wl_pointer_interface, wl_resource_get_version(seat_resource), id); if (resource == NULL) { wl_resource_post_no_memory(seat_resource); return; } wl_resource_set_implementation(resource, &wl_pointer_impl, seat_client, &wl_pointer_destroy); wl_list_insert(&seat_client->pointers, wl_resource_get_link(resource)); } static const struct wl_keyboard_interface wl_keyboard_impl = { .release = resource_destroy, }; static void wl_keyboard_destroy(struct wl_resource *resource) { wl_list_remove(wl_resource_get_link(resource)); } static void seat_client_send_keymap(struct wlr_seat_client *client, struct wlr_keyboard *keyboard) { if (!keyboard) { return; } // TODO: We should probably lift all of the keys set by the other // keyboard struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { wl_keyboard_send_keymap(resource, WL_KEYBOARD_KEYMAP_FORMAT_XKB_V1, keyboard->keymap_fd, keyboard->keymap_size); } } static void seat_client_send_repeat_info(struct wlr_seat_client *client, struct wlr_keyboard *keyboard) { if (!keyboard) { return; } struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (wl_resource_get_version(resource) >= WL_KEYBOARD_REPEAT_INFO_SINCE_VERSION) { wl_keyboard_send_repeat_info(resource, keyboard->repeat_info.rate, keyboard->repeat_info.delay); } } } static void wl_seat_get_keyboard(struct wl_client *client, struct wl_resource *seat_resource, uint32_t id) { struct wlr_seat_client *seat_client = wlr_seat_client_from_resource(seat_resource); if (!(seat_client->seat->capabilities & WL_SEAT_CAPABILITY_KEYBOARD)) { return; } struct wl_resource *resource = wl_resource_create(client, &wl_keyboard_interface, wl_resource_get_version(seat_resource), id); if (resource == NULL) { wl_resource_post_no_memory(seat_resource); return; } wl_resource_set_implementation(resource, &wl_keyboard_impl, seat_client, &wl_keyboard_destroy); wl_list_insert(&seat_client->keyboards, wl_resource_get_link(resource)); struct wlr_keyboard *keyboard = seat_client->seat->keyboard_state.keyboard; seat_client_send_keymap(seat_client, keyboard); seat_client_send_repeat_info(seat_client, keyboard); // TODO possibly handle the case where this keyboard needs an enter // right away } static const struct wl_touch_interface wl_touch_impl = { .release = resource_destroy, }; static void wl_touch_destroy(struct wl_resource *resource) { wl_list_remove(wl_resource_get_link(resource)); } static void wl_seat_get_touch(struct wl_client *client, struct wl_resource *seat_resource, uint32_t id) { struct wlr_seat_client *seat_client = wlr_seat_client_from_resource(seat_resource); if (!(seat_client->seat->capabilities & WL_SEAT_CAPABILITY_TOUCH)) { return; } struct wl_resource *resource = wl_resource_create(client, &wl_touch_interface, wl_resource_get_version(seat_resource), id); if (resource == NULL) { wl_resource_post_no_memory(seat_resource); return; } wl_resource_set_implementation(resource, &wl_touch_impl, seat_client, &wl_touch_destroy); wl_list_insert(&seat_client->touches, wl_resource_get_link(resource)); } static void wlr_seat_client_resource_destroy(struct wl_resource *seat_resource) { struct wlr_seat_client *client = wlr_seat_client_from_resource(seat_resource); wlr_signal_emit_safe(&client->events.destroy, client); if (client == client->seat->pointer_state.focused_client) { client->seat->pointer_state.focused_client = NULL; } if (client == client->seat->keyboard_state.focused_client) { client->seat->keyboard_state.focused_client = NULL; } struct wl_resource *resource, *tmp; wl_resource_for_each_safe(resource, tmp, &client->pointers) { wl_resource_destroy(resource); } wl_resource_for_each_safe(resource, tmp, &client->keyboards) { wl_resource_destroy(resource); } wl_resource_for_each_safe(resource, tmp, &client->touches) { wl_resource_destroy(resource); } wl_resource_for_each_safe(resource, tmp, &client->data_devices) { wl_resource_destroy(resource); } wl_resource_for_each_safe(resource, tmp, &client->primary_selection_devices) { wl_resource_destroy(resource); } wl_list_remove(&client->link); free(client); } struct wl_seat_interface wl_seat_impl = { .get_pointer = wl_seat_get_pointer, .get_keyboard = wl_seat_get_keyboard, .get_touch = wl_seat_get_touch, .release = resource_destroy, }; static void wl_seat_bind(struct wl_client *client, void *_wlr_seat, uint32_t version, uint32_t id) { struct wlr_seat *wlr_seat = _wlr_seat; assert(client && wlr_seat); struct wlr_seat_client *seat_client = calloc(1, sizeof(struct wlr_seat_client)); if (seat_client == NULL) { wl_client_post_no_memory(client); return; } seat_client->wl_resource = wl_resource_create(client, &wl_seat_interface, version, id); if (seat_client->wl_resource == NULL) { free(seat_client); wl_client_post_no_memory(client); return; } seat_client->client = client; seat_client->seat = wlr_seat; wl_list_init(&seat_client->pointers); wl_list_init(&seat_client->keyboards); wl_list_init(&seat_client->touches); wl_list_init(&seat_client->data_devices); wl_list_init(&seat_client->primary_selection_devices); wl_resource_set_implementation(seat_client->wl_resource, &wl_seat_impl, seat_client, wlr_seat_client_resource_destroy); wl_list_insert(&wlr_seat->clients, &seat_client->link); if (version >= WL_SEAT_NAME_SINCE_VERSION) { wl_seat_send_name(seat_client->wl_resource, wlr_seat->name); } wl_seat_send_capabilities(seat_client->wl_resource, wlr_seat->capabilities); wl_signal_init(&seat_client->events.destroy); } static void default_pointer_enter(struct wlr_seat_pointer_grab *grab, struct wlr_surface *surface, double sx, double sy) { wlr_seat_pointer_enter(grab->seat, surface, sx, sy); } static void default_pointer_motion(struct wlr_seat_pointer_grab *grab, uint32_t time, double sx, double sy) { wlr_seat_pointer_send_motion(grab->seat, time, sx, sy); } static uint32_t default_pointer_button(struct wlr_seat_pointer_grab *grab, uint32_t time, uint32_t button, uint32_t state) { return wlr_seat_pointer_send_button(grab->seat, time, button, state); } static void default_pointer_axis(struct wlr_seat_pointer_grab *grab, uint32_t time, enum wlr_axis_orientation orientation, double value) { wlr_seat_pointer_send_axis(grab->seat, time, orientation, value); } static void default_pointer_cancel(struct wlr_seat_pointer_grab *grab) { // cannot be cancelled } static const struct wlr_pointer_grab_interface default_pointer_grab_impl = { .enter = default_pointer_enter, .motion = default_pointer_motion, .button = default_pointer_button, .axis = default_pointer_axis, .cancel = default_pointer_cancel, }; static void default_keyboard_enter(struct wlr_seat_keyboard_grab *grab, struct wlr_surface *surface, uint32_t keycodes[], size_t num_keycodes, struct wlr_keyboard_modifiers *modifiers) { wlr_seat_keyboard_enter(grab->seat, surface, keycodes, num_keycodes, modifiers); } static void default_keyboard_key(struct wlr_seat_keyboard_grab *grab, uint32_t time, uint32_t key, uint32_t state) { wlr_seat_keyboard_send_key(grab->seat, time, key, state); } static void default_keyboard_modifiers(struct wlr_seat_keyboard_grab *grab, struct wlr_keyboard_modifiers *modifiers) { wlr_seat_keyboard_send_modifiers(grab->seat, modifiers); } static void default_keyboard_cancel(struct wlr_seat_keyboard_grab *grab) { // cannot be cancelled } static const struct wlr_keyboard_grab_interface default_keyboard_grab_impl = { .enter = default_keyboard_enter, .key = default_keyboard_key, .modifiers = default_keyboard_modifiers, .cancel = default_keyboard_cancel, }; static uint32_t default_touch_down(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { return wlr_seat_touch_send_down(grab->seat, point->surface, time, point->touch_id, point->sx, point->sy); } static void default_touch_up(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { wlr_seat_touch_send_up(grab->seat, time, point->touch_id); } static void default_touch_motion(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { if (!point->focus_surface || point->focus_surface == point->surface) { wlr_seat_touch_send_motion(grab->seat, time, point->touch_id, point->sx, point->sy); } } static void default_touch_enter(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { // not handled by default } static void default_touch_cancel(struct wlr_seat_touch_grab *grab) { // cannot be cancelled } static const struct wlr_touch_grab_interface default_touch_grab_impl = { .down = default_touch_down, .up = default_touch_up, .motion = default_touch_motion, .enter = default_touch_enter, .cancel = default_touch_cancel, }; void wlr_seat_destroy(struct wlr_seat *seat) { if (!seat) { return; } wlr_signal_emit_safe(&seat->events.destroy, seat); wl_list_remove(&seat->display_destroy.link); if (seat->selection_data_source) { seat->selection_data_source->cancel(seat->selection_data_source); seat->selection_data_source = NULL; wl_list_remove(&seat->selection_data_source_destroy.link); } if (seat->primary_selection_source) { seat->primary_selection_source->cancel(seat->primary_selection_source); seat->primary_selection_source = NULL; wl_list_remove(&seat->primary_selection_source_destroy.link); } struct wlr_seat_client *client, *tmp; wl_list_for_each_safe(client, tmp, &seat->clients, link) { // will destroy other resources as well wl_resource_destroy(client->wl_resource); } wl_global_destroy(seat->wl_global); free(seat->pointer_state.default_grab); free(seat->keyboard_state.default_grab); free(seat->touch_state.default_grab); free(seat->name); free(seat); } static void handle_display_destroy(struct wl_listener *listener, void *data) { struct wlr_seat *seat = wl_container_of(listener, seat, display_destroy); wlr_seat_destroy(seat); } struct wlr_seat *wlr_seat_create(struct wl_display *display, const char *name) { struct wlr_seat *wlr_seat = calloc(1, sizeof(struct wlr_seat)); if (!wlr_seat) { return NULL; } // pointer state wlr_seat->pointer_state.seat = wlr_seat; wl_list_init(&wlr_seat->pointer_state.surface_destroy.link); wl_list_init(&wlr_seat->pointer_state.resource_destroy.link); struct wlr_seat_pointer_grab *pointer_grab = calloc(1, sizeof(struct wlr_seat_pointer_grab)); if (!pointer_grab) { free(wlr_seat); return NULL; } pointer_grab->interface = &default_pointer_grab_impl; pointer_grab->seat = wlr_seat; wlr_seat->pointer_state.default_grab = pointer_grab; wlr_seat->pointer_state.grab = pointer_grab; // keyboard state struct wlr_seat_keyboard_grab *keyboard_grab = calloc(1, sizeof(struct wlr_seat_keyboard_grab)); if (!keyboard_grab) { free(pointer_grab); free(wlr_seat); return NULL; } keyboard_grab->interface = &default_keyboard_grab_impl; keyboard_grab->seat = wlr_seat; wlr_seat->keyboard_state.default_grab = keyboard_grab; wlr_seat->keyboard_state.grab = keyboard_grab; wlr_seat->keyboard_state.seat = wlr_seat; wl_list_init(&wlr_seat->keyboard_state.resource_destroy.link); wl_list_init( &wlr_seat->keyboard_state.surface_destroy.link); // touch state struct wlr_seat_touch_grab *touch_grab = calloc(1, sizeof(struct wlr_seat_touch_grab)); if (!touch_grab) { free(pointer_grab); free(keyboard_grab); free(wlr_seat); return NULL; } touch_grab->interface = &default_touch_grab_impl; touch_grab->seat = wlr_seat; wlr_seat->touch_state.default_grab = touch_grab; wlr_seat->touch_state.grab = touch_grab; wlr_seat->touch_state.seat = wlr_seat; wl_list_init(&wlr_seat->touch_state.touch_points); struct wl_global *wl_global = wl_global_create(display, &wl_seat_interface, 6, wlr_seat, wl_seat_bind); if (!wl_global) { free(wlr_seat); return NULL; } wlr_seat->wl_global = wl_global; wlr_seat->display = display; wlr_seat->name = strdup(name); wl_list_init(&wlr_seat->clients); wl_list_init(&wlr_seat->drag_icons); wl_signal_init(&wlr_seat->events.new_drag_icon); wl_signal_init(&wlr_seat->events.request_set_cursor); wl_signal_init(&wlr_seat->events.selection); wl_signal_init(&wlr_seat->events.primary_selection); wl_signal_init(&wlr_seat->events.pointer_grab_begin); wl_signal_init(&wlr_seat->events.pointer_grab_end); wl_signal_init(&wlr_seat->events.keyboard_grab_begin); wl_signal_init(&wlr_seat->events.keyboard_grab_end); wl_signal_init(&wlr_seat->events.touch_grab_begin); wl_signal_init(&wlr_seat->events.touch_grab_end); wl_signal_init(&wlr_seat->events.destroy); wlr_seat->display_destroy.notify = handle_display_destroy; wl_display_add_destroy_listener(display, &wlr_seat->display_destroy); return wlr_seat; } struct wlr_seat_client *wlr_seat_client_for_wl_client(struct wlr_seat *wlr_seat, struct wl_client *wl_client) { assert(wlr_seat); struct wlr_seat_client *seat_client; wl_list_for_each(seat_client, &wlr_seat->clients, link) { if (seat_client->client == wl_client) { return seat_client; } } return NULL; } void wlr_seat_set_capabilities(struct wlr_seat *wlr_seat, uint32_t capabilities) { wlr_seat->capabilities = capabilities; struct wlr_seat_client *client; wl_list_for_each(client, &wlr_seat->clients, link) { wl_seat_send_capabilities(client->wl_resource, capabilities); } } void wlr_seat_set_name(struct wlr_seat *wlr_seat, const char *name) { free(wlr_seat->name); wlr_seat->name = strdup(name); struct wlr_seat_client *client; wl_list_for_each(client, &wlr_seat->clients, link) { wl_seat_send_name(client->wl_resource, name); } } bool wlr_seat_pointer_surface_has_focus(struct wlr_seat *wlr_seat, struct wlr_surface *surface) { return surface == wlr_seat->pointer_state.focused_surface; } static void pointer_surface_destroy_notify(struct wl_listener *listener, void *data) { struct wlr_seat_pointer_state *state = wl_container_of( listener, state, surface_destroy); wl_list_remove(&state->surface_destroy.link); wl_list_init(&state->surface_destroy.link); wlr_seat_pointer_clear_focus(state->seat); } static void pointer_resource_destroy_notify(struct wl_listener *listener, void *data) { struct wlr_seat_pointer_state *state = wl_container_of( listener, state, resource_destroy); wl_list_remove(&state->resource_destroy.link); wl_list_init(&state->resource_destroy.link); wlr_seat_pointer_clear_focus(state->seat); } void wlr_seat_pointer_enter(struct wlr_seat *wlr_seat, struct wlr_surface *surface, double sx, double sy) { assert(wlr_seat); if (wlr_seat->pointer_state.focused_surface == surface) { // this surface already got an enter notify return; } struct wlr_seat_client *client = NULL; if (surface) { struct wl_client *wl_client = wl_resource_get_client(surface->resource); client = wlr_seat_client_for_wl_client(wlr_seat, wl_client); } struct wlr_seat_client *focused_client = wlr_seat->pointer_state.focused_client; struct wlr_surface *focused_surface = wlr_seat->pointer_state.focused_surface; // leave the previously entered surface if (focused_client != NULL && focused_surface != NULL) { uint32_t serial = wl_display_next_serial(wlr_seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &focused_client->pointers) { wl_pointer_send_leave(resource, serial, focused_surface->resource); pointer_send_frame(resource); } } // enter the current surface if (client != NULL && surface != NULL) { uint32_t serial = wl_display_next_serial(wlr_seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &client->pointers) { wl_pointer_send_enter(resource, serial, surface->resource, wl_fixed_from_double(sx), wl_fixed_from_double(sy)); pointer_send_frame(resource); } } // reinitialize the focus destroy events wl_list_remove(&wlr_seat->pointer_state.surface_destroy.link); wl_list_init(&wlr_seat->pointer_state.surface_destroy.link); wl_list_remove(&wlr_seat->pointer_state.resource_destroy.link); wl_list_init(&wlr_seat->pointer_state.resource_destroy.link); if (surface != NULL) { wl_signal_add(&surface->events.destroy, &wlr_seat->pointer_state.surface_destroy); wl_resource_add_destroy_listener(surface->resource, &wlr_seat->pointer_state.resource_destroy); wlr_seat->pointer_state.resource_destroy.notify = pointer_resource_destroy_notify; wlr_seat->pointer_state.surface_destroy.notify = pointer_surface_destroy_notify; } wlr_seat->pointer_state.focused_client = client; wlr_seat->pointer_state.focused_surface = surface; // TODO: send focus change event } void wlr_seat_pointer_clear_focus(struct wlr_seat *wlr_seat) { wlr_seat_pointer_enter(wlr_seat, NULL, 0, 0); } void wlr_seat_pointer_send_motion(struct wlr_seat *wlr_seat, uint32_t time, double sx, double sy) { struct wlr_seat_client *client = wlr_seat->pointer_state.focused_client; if (client == NULL) { return; } struct wl_resource *resource; wl_resource_for_each(resource, &client->pointers) { wl_pointer_send_motion(resource, time, wl_fixed_from_double(sx), wl_fixed_from_double(sy)); pointer_send_frame(resource); } } uint32_t wlr_seat_pointer_send_button(struct wlr_seat *wlr_seat, uint32_t time, uint32_t button, uint32_t state) { struct wlr_seat_client *client = wlr_seat->pointer_state.focused_client; if (client == NULL) { return 0; } uint32_t serial = wl_display_next_serial(wlr_seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &client->pointers) { wl_pointer_send_button(resource, serial, time, button, state); pointer_send_frame(resource); } return serial; } void wlr_seat_pointer_send_axis(struct wlr_seat *wlr_seat, uint32_t time, enum wlr_axis_orientation orientation, double value) { struct wlr_seat_client *client = wlr_seat->pointer_state.focused_client; if (client == NULL) { return; } struct wl_resource *resource; wl_resource_for_each(resource, &client->pointers) { if (value) { wl_pointer_send_axis(resource, time, orientation, wl_fixed_from_double(value)); } else if (wl_resource_get_version(resource) >= WL_POINTER_AXIS_STOP_SINCE_VERSION) { wl_pointer_send_axis_stop(resource, time, orientation); } pointer_send_frame(resource); } } void wlr_seat_pointer_start_grab(struct wlr_seat *wlr_seat, struct wlr_seat_pointer_grab *grab) { assert(wlr_seat); grab->seat = wlr_seat; assert(grab->seat); wlr_seat->pointer_state.grab = grab; wlr_signal_emit_safe(&wlr_seat->events.pointer_grab_begin, grab); } void wlr_seat_pointer_end_grab(struct wlr_seat *wlr_seat) { struct wlr_seat_pointer_grab *grab = wlr_seat->pointer_state.grab; if (grab != wlr_seat->pointer_state.default_grab) { wlr_seat->pointer_state.grab = wlr_seat->pointer_state.default_grab; wlr_signal_emit_safe(&wlr_seat->events.pointer_grab_end, grab); if (grab->interface->cancel) { grab->interface->cancel(grab); } } } void wlr_seat_pointer_notify_enter(struct wlr_seat *wlr_seat, struct wlr_surface *surface, double sx, double sy) { struct wlr_seat_pointer_grab *grab = wlr_seat->pointer_state.grab; grab->interface->enter(grab, surface, sx, sy); } void wlr_seat_pointer_notify_motion(struct wlr_seat *wlr_seat, uint32_t time, double sx, double sy) { clock_gettime(CLOCK_MONOTONIC, &wlr_seat->last_event); struct wlr_seat_pointer_grab *grab = wlr_seat->pointer_state.grab; grab->interface->motion(grab, time, sx, sy); } uint32_t wlr_seat_pointer_notify_button(struct wlr_seat *wlr_seat, uint32_t time, uint32_t button, uint32_t state) { clock_gettime(CLOCK_MONOTONIC, &wlr_seat->last_event); if (state == WL_POINTER_BUTTON_STATE_PRESSED) { if (wlr_seat->pointer_state.button_count == 0) { wlr_seat->pointer_state.grab_button = button; wlr_seat->pointer_state.grab_time = time; } wlr_seat->pointer_state.button_count++; } else { wlr_seat->pointer_state.button_count--; } struct wlr_seat_pointer_grab *grab = wlr_seat->pointer_state.grab; uint32_t serial = grab->interface->button(grab, time, button, state); if (serial && wlr_seat->pointer_state.button_count == 1) { wlr_seat->pointer_state.grab_serial = serial; } return serial; } void wlr_seat_pointer_notify_axis(struct wlr_seat *wlr_seat, uint32_t time, enum wlr_axis_orientation orientation, double value) { clock_gettime(CLOCK_MONOTONIC, &wlr_seat->last_event); struct wlr_seat_pointer_grab *grab = wlr_seat->pointer_state.grab; grab->interface->axis(grab, time, orientation, value); } bool wlr_seat_pointer_has_grab(struct wlr_seat *seat) { return seat->pointer_state.grab->interface != &default_pointer_grab_impl; } void wlr_seat_keyboard_send_key(struct wlr_seat *wlr_seat, uint32_t time, uint32_t key, uint32_t state) { struct wlr_seat_client *client = wlr_seat->keyboard_state.focused_client; if (!client) { return; } uint32_t serial = wl_display_next_serial(wlr_seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { wl_keyboard_send_key(resource, serial, time, key, state); } } static void handle_keyboard_keymap(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of(listener, state, keyboard_keymap); struct wlr_seat_client *client; struct wlr_keyboard *keyboard = data; if (keyboard == state->keyboard) { wl_list_for_each(client, &state->seat->clients, link) { seat_client_send_keymap(client, state->keyboard); } } } static void handle_keyboard_repeat_info(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of(listener, state, keyboard_repeat_info); struct wlr_seat_client *client; wl_list_for_each(client, &state->seat->clients, link) { seat_client_send_repeat_info(client, state->keyboard); } } static void handle_keyboard_destroy(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of(listener, state, keyboard_destroy); state->keyboard = NULL; } void wlr_seat_set_keyboard(struct wlr_seat *seat, struct wlr_input_device *device) { // TODO call this on device key event before the event reaches the // compositor and set a pending keyboard and then send the new keyboard // state on the next keyboard notify event. struct wlr_keyboard *keyboard = (device ? device->keyboard : NULL); if (seat->keyboard_state.keyboard == keyboard) { return; } if (seat->keyboard_state.keyboard) { wl_list_remove(&seat->keyboard_state.keyboard_destroy.link); wl_list_remove(&seat->keyboard_state.keyboard_keymap.link); wl_list_remove(&seat->keyboard_state.keyboard_repeat_info.link); seat->keyboard_state.keyboard = NULL; } if (keyboard) { assert(device->type == WLR_INPUT_DEVICE_KEYBOARD); seat->keyboard_state.keyboard = keyboard; wl_signal_add(&device->events.destroy, &seat->keyboard_state.keyboard_destroy); seat->keyboard_state.keyboard_destroy.notify = handle_keyboard_destroy; wl_signal_add(&device->keyboard->events.keymap, &seat->keyboard_state.keyboard_keymap); seat->keyboard_state.keyboard_keymap.notify = handle_keyboard_keymap; wl_signal_add(&device->keyboard->events.repeat_info, &seat->keyboard_state.keyboard_repeat_info); seat->keyboard_state.keyboard_repeat_info.notify = handle_keyboard_repeat_info; struct wlr_seat_client *client; wl_list_for_each(client, &seat->clients, link) { seat_client_send_keymap(client, keyboard); seat_client_send_repeat_info(client, keyboard); } wlr_seat_keyboard_send_modifiers(seat, &keyboard->modifiers); } else { seat->keyboard_state.keyboard = NULL; } } struct wlr_keyboard *wlr_seat_get_keyboard(struct wlr_seat *seat) { return seat->keyboard_state.keyboard; } void wlr_seat_keyboard_start_grab(struct wlr_seat *wlr_seat, struct wlr_seat_keyboard_grab *grab) { grab->seat = wlr_seat; wlr_seat->keyboard_state.grab = grab; wlr_signal_emit_safe(&wlr_seat->events.keyboard_grab_begin, grab); } void wlr_seat_keyboard_end_grab(struct wlr_seat *wlr_seat) { struct wlr_seat_keyboard_grab *grab = wlr_seat->keyboard_state.grab; if (grab != wlr_seat->keyboard_state.default_grab) { wlr_seat->keyboard_state.grab = wlr_seat->keyboard_state.default_grab; wlr_signal_emit_safe(&wlr_seat->events.keyboard_grab_end, grab); if (grab->interface->cancel) { grab->interface->cancel(grab); } } } static void keyboard_surface_destroy_notify(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of( listener, state, surface_destroy); wl_list_remove(&state->surface_destroy.link); wl_list_init(&state->surface_destroy.link); wlr_seat_keyboard_clear_focus(state->seat); } static void keyboard_resource_destroy_notify(struct wl_listener *listener, void *data) { struct wlr_seat_keyboard_state *state = wl_container_of( listener, state, resource_destroy); wl_list_remove(&state->resource_destroy.link); wl_list_init(&state->resource_destroy.link); wlr_seat_keyboard_clear_focus(state->seat); } void wlr_seat_keyboard_send_modifiers(struct wlr_seat *seat, struct wlr_keyboard_modifiers *modifiers) { struct wlr_seat_client *client = seat->keyboard_state.focused_client; if (client == NULL) { return; } uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { if (modifiers == NULL) { wl_keyboard_send_modifiers(resource, serial, 0, 0, 0, 0); } else { wl_keyboard_send_modifiers(resource, serial, modifiers->depressed, modifiers->latched, modifiers->locked, modifiers->group); } } } void wlr_seat_keyboard_enter(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t keycodes[], size_t num_keycodes, struct wlr_keyboard_modifiers *modifiers) { if (seat->keyboard_state.focused_surface == surface) { // this surface already got an enter notify return; } struct wlr_seat_client *client = NULL; if (surface) { struct wl_client *wl_client = wl_resource_get_client(surface->resource); client = wlr_seat_client_for_wl_client(seat, wl_client); } struct wlr_seat_client *focused_client = seat->keyboard_state.focused_client; struct wlr_surface *focused_surface = seat->keyboard_state.focused_surface; // leave the previously entered surface if (focused_client != NULL && focused_surface != NULL) { uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &focused_client->keyboards) { wl_keyboard_send_leave(resource, serial, focused_surface->resource); } } // enter the current surface if (client != NULL) { struct wl_array keys; wl_array_init(&keys); for (size_t i = 0; i < num_keycodes; ++i) { uint32_t *p = wl_array_add(&keys, sizeof(uint32_t)); if (!p) { wlr_log(L_ERROR, "Cannot allocate memory, skipping keycode: %d\n", keycodes[i]); continue; } *p = keycodes[i]; } uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &client->keyboards) { wl_keyboard_send_enter(resource, serial, surface->resource, &keys); } wl_array_release(&keys); wlr_seat_client_send_selection(client); wlr_seat_client_send_primary_selection(client); } // reinitialize the focus destroy events wl_list_remove(&seat->keyboard_state.surface_destroy.link); wl_list_init(&seat->keyboard_state.surface_destroy.link); wl_list_remove(&seat->keyboard_state.resource_destroy.link); wl_list_init(&seat->keyboard_state.resource_destroy.link); if (surface) { wl_signal_add(&surface->events.destroy, &seat->keyboard_state.surface_destroy); wl_resource_add_destroy_listener(surface->resource, &seat->keyboard_state.resource_destroy); seat->keyboard_state.resource_destroy.notify = keyboard_resource_destroy_notify; seat->keyboard_state.surface_destroy.notify = keyboard_surface_destroy_notify; } seat->keyboard_state.focused_client = client; seat->keyboard_state.focused_surface = surface; if (client != NULL) { // tell new client about any modifier change last, // as it targets seat->keyboard_state.focused_client wlr_seat_keyboard_send_modifiers(seat, modifiers); } } void wlr_seat_keyboard_notify_enter(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t keycodes[], size_t num_keycodes, struct wlr_keyboard_modifiers *modifiers) { struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->enter(grab, surface, keycodes, num_keycodes, modifiers); } void wlr_seat_keyboard_clear_focus(struct wlr_seat *seat) { // TODO respect grabs here? wlr_seat_keyboard_enter(seat, NULL, NULL, 0, NULL); } bool wlr_seat_keyboard_has_grab(struct wlr_seat *seat) { return seat->keyboard_state.grab->interface != &default_keyboard_grab_impl; } void wlr_seat_keyboard_notify_modifiers(struct wlr_seat *seat, struct wlr_keyboard_modifiers *modifiers) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->modifiers(grab, modifiers); } void wlr_seat_keyboard_notify_key(struct wlr_seat *seat, uint32_t time, uint32_t key, uint32_t state) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_keyboard_grab *grab = seat->keyboard_state.grab; grab->interface->key(grab, time, key, state); } void wlr_seat_touch_start_grab(struct wlr_seat *wlr_seat, struct wlr_seat_touch_grab *grab) { grab->seat = wlr_seat; wlr_seat->touch_state.grab = grab; wlr_signal_emit_safe(&wlr_seat->events.touch_grab_begin, grab); } void wlr_seat_touch_end_grab(struct wlr_seat *wlr_seat) { struct wlr_seat_touch_grab *grab = wlr_seat->touch_state.grab; if (grab != wlr_seat->touch_state.default_grab) { wlr_seat->touch_state.grab = wlr_seat->touch_state.default_grab; wlr_signal_emit_safe(&wlr_seat->events.touch_grab_end, grab); if (grab->interface->cancel) { grab->interface->cancel(grab); } } } static void touch_point_clear_focus(struct wlr_touch_point *point) { if (point->focus_surface) { wl_list_remove(&point->focus_surface_destroy.link); point->focus_client = NULL; point->focus_surface = NULL; } } static void touch_point_destroy(struct wlr_touch_point *point) { wlr_signal_emit_safe(&point->events.destroy, point); touch_point_clear_focus(point); wl_list_remove(&point->surface_destroy.link); wl_list_remove(&point->resource_destroy.link); wl_list_remove(&point->link); free(point); } static void handle_touch_point_resource_destroy(struct wl_listener *listener, void *data) { struct wlr_touch_point *point = wl_container_of(listener, point, resource_destroy); touch_point_destroy(point); } static void handle_touch_point_surface_destroy(struct wl_listener *listener, void *data) { struct wlr_touch_point *point = wl_container_of(listener, point, surface_destroy); touch_point_destroy(point); } static struct wlr_touch_point *touch_point_create( struct wlr_seat *seat, int32_t touch_id, struct wlr_surface *surface, double sx, double sy) { struct wl_client *wl_client = wl_resource_get_client(surface->resource); struct wlr_seat_client *client = wlr_seat_client_for_wl_client(seat, wl_client); if (client == NULL || wl_list_empty(&client->touches)) { // touch points are not valid without a connected client with touch return NULL; } struct wlr_touch_point *point = calloc(1, sizeof(struct wlr_touch_point)); if (!point) { return NULL; } point->touch_id = touch_id; point->surface = surface; point->client = client; point->sx = sx; point->sy = sy; wl_signal_init(&point->events.destroy); wl_signal_add(&surface->events.destroy, &point->surface_destroy); point->surface_destroy.notify = handle_touch_point_surface_destroy; wl_resource_add_destroy_listener(surface->resource, &point->resource_destroy); point->resource_destroy.notify = handle_touch_point_resource_destroy; wl_list_insert(&seat->touch_state.touch_points, &point->link); return point; } struct wlr_touch_point *wlr_seat_touch_get_point( struct wlr_seat *seat, int32_t touch_id) { struct wlr_touch_point *point = NULL; wl_list_for_each(point, &seat->touch_state.touch_points, link) { if (point->touch_id == touch_id) { return point; } } return NULL; } uint32_t wlr_seat_touch_notify_down(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t time, int32_t touch_id, double sx, double sy) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_touch_grab *grab = seat->touch_state.grab; struct wlr_touch_point *point = touch_point_create(seat, touch_id, surface, sx, sy); if (!point) { wlr_log(L_ERROR, "could not create touch point"); return 0; } uint32_t serial = grab->interface->down(grab, time, point); if (serial && wlr_seat_touch_num_points(seat) == 1) { seat->touch_state.grab_serial = serial; seat->touch_state.grab_id = touch_id; } return serial; } void wlr_seat_touch_notify_up(struct wlr_seat *seat, uint32_t time, int32_t touch_id) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_touch_grab *grab = seat->touch_state.grab; struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch up for unknown touch point"); return; } grab->interface->up(grab, time, point); touch_point_destroy(point); } void wlr_seat_touch_notify_motion(struct wlr_seat *seat, uint32_t time, int32_t touch_id, double sx, double sy) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_touch_grab *grab = seat->touch_state.grab; struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch motion for unknown touch point"); return; } point->sx = sx; point->sy = sy; grab->interface->motion(grab, time, point); } static void handle_point_focus_destroy(struct wl_listener *listener, void *data) { struct wlr_touch_point *point = wl_container_of(listener, point, focus_surface_destroy); touch_point_clear_focus(point); } static void touch_point_set_focus(struct wlr_touch_point *point, struct wlr_surface *surface, double sx, double sy) { if (point->focus_surface == surface) { return; } touch_point_clear_focus(point); if (surface && surface->resource) { struct wlr_seat_client *client = wlr_seat_client_for_wl_client(point->client->seat, wl_resource_get_client(surface->resource)); if (client && !wl_list_empty(&client->touches)) { wl_signal_add(&surface->events.destroy, &point->focus_surface_destroy); point->focus_surface_destroy.notify = handle_point_focus_destroy; point->focus_surface = surface; point->focus_client = client; point->sx = sx; point->sy = sy; } } } void wlr_seat_touch_point_focus(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t time, int32_t touch_id, double sx, double sy) { assert(surface); struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch point focus for unknown touch point"); return; } struct wlr_surface *focus = point->focus_surface; touch_point_set_focus(point, surface, sx, sy); if (focus != point->focus_surface) { struct wlr_seat_touch_grab *grab = seat->touch_state.grab; grab->interface->enter(grab, time, point); } } void wlr_seat_touch_point_clear_focus(struct wlr_seat *seat, uint32_t time, int32_t touch_id) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch point focus for unknown touch point"); return; } touch_point_clear_focus(point); } uint32_t wlr_seat_touch_send_down(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t time, int32_t touch_id, double sx, double sy) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch down for unknown touch point"); return 0; } uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &point->client->touches) { wl_touch_send_down(resource, serial, time, surface->resource, touch_id, wl_fixed_from_double(sx), wl_fixed_from_double(sy)); wl_touch_send_frame(resource); } return serial; } void wlr_seat_touch_send_up(struct wlr_seat *seat, uint32_t time, int32_t touch_id) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch up for unknown touch point"); return; } uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &point->client->touches) { wl_touch_send_up(resource, serial, time, touch_id); wl_touch_send_frame(resource); } } void wlr_seat_touch_send_motion(struct wlr_seat *seat, uint32_t time, int32_t touch_id, double sx, double sy) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(L_ERROR, "got touch motion for unknown touch point"); return; } struct wl_resource *resource; wl_resource_for_each(resource, &point->client->touches) { wl_touch_send_motion(resource, time, touch_id, wl_fixed_from_double(sx), wl_fixed_from_double(sy)); wl_touch_send_frame(resource); } } int wlr_seat_touch_num_points(struct wlr_seat *seat) { return wl_list_length(&seat->touch_state.touch_points); } bool wlr_seat_touch_has_grab(struct wlr_seat *seat) { return seat->touch_state.grab->interface != &default_touch_grab_impl; } bool wlr_seat_validate_grab_serial(struct wlr_seat *seat, uint32_t serial) { return serial == seat->pointer_state.grab_serial || serial == seat->touch_state.grab_serial; } struct wlr_seat_client *wlr_seat_client_from_resource( struct wl_resource *resource) { assert(wl_resource_instance_of(resource, &wl_seat_interface, &wl_seat_impl)); return wl_resource_get_user_data(resource); }
1
10,419
TODO: fix this, probably in another PR
swaywm-wlroots
c
@@ -591,6 +591,17 @@ class HintManager(QObject): message.error("No elements found.") return + # Because _start_cb is called asynchronously, it's possible that the + # user switched to another tab or closed the tab/window. In that case + # we should not start hinting. + tabbed_browser = objreg.get('tabbed-browser', default=None, + scope='window', window=self._win_id) + if tabbed_browser is None: + return + tab = tabbed_browser.widget.currentWidget() + if tab is None or tab.tab_id != self._tab_id: + return + strings = self._hint_strings(elems) log.hints.debug("hints: {}".format(', '.join(strings)))
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2019 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """A HintManager to draw hints over links.""" import collections import functools import os import re import html import enum from string import ascii_lowercase import attr from PyQt5.QtCore import pyqtSlot, QObject, Qt, QUrl from PyQt5.QtWidgets import QLabel from qutebrowser.config import config, configexc from qutebrowser.keyinput import modeman, modeparsers from qutebrowser.browser import webelem from qutebrowser.commands import userscripts, runners from qutebrowser.api import cmdutils from qutebrowser.utils import usertypes, log, qtutils, message, objreg, utils Target = enum.Enum('Target', ['normal', 'current', 'tab', 'tab_fg', 'tab_bg', 'window', 'yank', 'yank_primary', 'run', 'fill', 'hover', 'download', 'userscript', 'spawn']) class HintingError(Exception): """Exception raised on errors during hinting.""" def on_mode_entered(mode, win_id): """Stop hinting when insert mode was entered.""" if mode == usertypes.KeyMode.insert: modeman.leave(win_id, usertypes.KeyMode.hint, 'insert mode', maybe=True) class HintLabel(QLabel): """A label for a link. Attributes: elem: The element this label belongs to. _context: The current hinting context. """ def __init__(self, elem, context): super().__init__(parent=context.tab) self._context = context self.elem = elem # Make sure we can style the background via a style sheet, and we don't # get any extra text indent from Qt. # The real stylesheet lives in mainwindow.py for performance reasons.. self.setAttribute(Qt.WA_StyledBackground, True) self.setIndent(0) self._context.tab.contents_size_changed.connect(self._move_to_elem) self._move_to_elem() self.show() def __repr__(self): try: text = self.text() except RuntimeError: text = '<deleted>' return utils.get_repr(self, elem=self.elem, text=text) def update_text(self, matched, unmatched): """Set the text for the hint. Args: matched: The part of the text which was typed. unmatched: The part of the text which was not typed yet. """ if (config.cache['hints.uppercase'] and self._context.hint_mode in ['letter', 'word']): matched = html.escape(matched.upper()) unmatched = html.escape(unmatched.upper()) else: matched = html.escape(matched) unmatched = html.escape(unmatched) match_color = html.escape(config.cache['colors.hints.match.fg']) if matched: self.setText('<font color="{}">{}</font>{}'.format( match_color, matched, unmatched)) else: self.setText(unmatched) self.adjustSize() @pyqtSlot() def _move_to_elem(self): """Reposition the label to its element.""" if not self.elem.has_frame(): # This sometimes happens for some reason... log.hints.debug("Frame for {!r} vanished!".format(self)) self.hide() return no_js = config.cache['hints.find_implementation'] != 'javascript' rect = self.elem.rect_on_view(no_js=no_js) self.move(rect.x(), rect.y()) def cleanup(self): """Clean up this element and hide it.""" self.hide() self.deleteLater() @attr.s class HintContext: """Context namespace used for hinting. Attributes: all_labels: A list of all HintLabel objects ever created. labels: A mapping from key strings to HintLabel objects. May contain less elements than `all_labels` due to filtering. baseurl: The URL of the current page. target: What to do with the opened links. normal/current/tab/tab_fg/tab_bg/window: Get passed to BrowserTab. yank/yank_primary: Yank to clipboard/primary selection. run: Run a command. fill: Fill commandline with link. download: Download the link. userscript: Call a custom userscript. spawn: Spawn a simple command. to_follow: The link to follow when enter is pressed. args: Custom arguments for userscript/spawn rapid: Whether to do rapid hinting. first_run: Whether the action is run for the 1st time in rapid hinting. add_history: Whether to add yanked or spawned link to the history. filterstr: Used to save the filter string for restoring in rapid mode. tab: The WebTab object we started hinting in. group: The group of web elements to hint. """ all_labels = attr.ib(attr.Factory(list)) labels = attr.ib(attr.Factory(dict)) target = attr.ib(None) baseurl = attr.ib(None) to_follow = attr.ib(None) rapid = attr.ib(False) first_run = attr.ib(True) add_history = attr.ib(False) filterstr = attr.ib(None) args = attr.ib(attr.Factory(list)) tab = attr.ib(None) group = attr.ib(None) hint_mode = attr.ib(None) first = attr.ib(False) def get_args(self, urlstr): """Get the arguments, with {hint-url} replaced by the given URL.""" args = [] for arg in self.args: arg = arg.replace('{hint-url}', urlstr) args.append(arg) return args class HintActions: """Actions which can be done after selecting a hint.""" def __init__(self, win_id): self._win_id = win_id def click(self, elem, context): """Click an element. Args: elem: The QWebElement to click. context: The HintContext to use. """ target_mapping = { Target.normal: usertypes.ClickTarget.normal, Target.current: usertypes.ClickTarget.normal, Target.tab_fg: usertypes.ClickTarget.tab, Target.tab_bg: usertypes.ClickTarget.tab_bg, Target.window: usertypes.ClickTarget.window, Target.hover: usertypes.ClickTarget.normal, } if config.val.tabs.background: target_mapping[Target.tab] = usertypes.ClickTarget.tab_bg else: target_mapping[Target.tab] = usertypes.ClickTarget.tab if context.target in [Target.normal, Target.current]: # Set the pre-jump mark ', so we can jump back here after following context.tab.scroller.before_jump_requested.emit() try: if context.target == Target.hover: elem.hover() elif context.target == Target.current: elem.remove_blank_target() elem.click(target_mapping[context.target]) else: elem.click(target_mapping[context.target]) except webelem.Error as e: raise HintingError(str(e)) def yank(self, url, context): """Yank an element to the clipboard or primary selection. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ sel = (context.target == Target.yank_primary and utils.supports_selection()) flags = QUrl.FullyEncoded | QUrl.RemovePassword if url.scheme() == 'mailto': flags |= QUrl.RemoveScheme urlstr = url.toString(flags) new_content = urlstr # only second and consecutive yanks are to append to the clipboard if context.rapid and not context.first_run: try: old_content = utils.get_clipboard(selection=sel) except utils.ClipboardEmptyError: pass else: new_content = os.linesep.join([old_content, new_content]) utils.set_clipboard(new_content, selection=sel) msg = "Yanked URL to {}: {}".format( "primary selection" if sel else "clipboard", urlstr) message.info(msg) def run_cmd(self, url, context): """Run the command based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely(' '.join(args)) def preset_cmd_text(self, url, context): """Preset a commandline text based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toDisplayString(QUrl.FullyEncoded) args = context.get_args(urlstr) text = ' '.join(args) if text[0] not in modeparsers.STARTCHARS: raise HintingError("Invalid command text '{}'.".format(text)) cmd = objreg.get('status-command', scope='window', window=self._win_id) cmd.set_cmd_text(text) def download(self, elem, context): """Download a hint URL. Args: elem: The QWebElement to download. _context: The HintContext to use. """ url = elem.resolve_url(context.baseurl) if url is None: raise HintingError("No suitable link found for this element.") prompt = False if context.rapid else None qnam = context.tab.private_api.networkaccessmanager() user_agent = context.tab.private_api.user_agent() # FIXME:qtwebengine do this with QtWebEngine downloads? download_manager = objreg.get('qtnetwork-download-manager') download_manager.get(url, qnam=qnam, user_agent=user_agent, prompt_download_directory=prompt) def call_userscript(self, elem, context): """Call a userscript from a hint. Args: elem: The QWebElement to use in the userscript. context: The HintContext to use. """ cmd = context.args[0] args = context.args[1:] env = { 'QUTE_MODE': 'hints', 'QUTE_SELECTED_TEXT': str(elem), 'QUTE_SELECTED_HTML': elem.outer_xml(), } url = elem.resolve_url(context.baseurl) if url is not None: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) try: userscripts.run_async(context.tab, cmd, *args, win_id=self._win_id, env=env) except userscripts.Error as e: raise HintingError(str(e)) def spawn(self, url, context): """Spawn a simple command from a hint. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely('spawn ' + ' '.join(args)) class HintManager(QObject): """Manage drawing hints over links or other elements. Class attributes: HINT_TEXTS: Text displayed for different hinting modes. Attributes: _context: The HintContext for the current invocation. _win_id: The window ID this HintManager is associated with. _tab_id: The tab ID this HintManager is associated with. Signals: See HintActions """ HINT_TEXTS = { Target.normal: "Follow hint", Target.current: "Follow hint in current tab", Target.tab: "Follow hint in new tab", Target.tab_fg: "Follow hint in foreground tab", Target.tab_bg: "Follow hint in background tab", Target.window: "Follow hint in new window", Target.yank: "Yank hint to clipboard", Target.yank_primary: "Yank hint to primary selection", Target.run: "Run a command on a hint", Target.fill: "Set hint in commandline", Target.hover: "Hover over a hint", Target.download: "Download hint", Target.userscript: "Call userscript via hint", Target.spawn: "Spawn command via hint", } def __init__(self, win_id, tab_id, parent=None): """Constructor.""" super().__init__(parent) self._win_id = win_id self._tab_id = tab_id self._context = None self._word_hinter = WordHinter() self._actions = HintActions(win_id) mode_manager = objreg.get('mode-manager', scope='window', window=win_id) mode_manager.left.connect(self.on_mode_left) def _get_text(self): """Get a hint text based on the current context.""" text = self.HINT_TEXTS[self._context.target] if self._context.rapid: text += ' (rapid mode)' text += '...' return text def _cleanup(self): """Clean up after hinting.""" for label in self._context.all_labels: label.cleanup() text = self._get_text() message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.maybe_reset_text(text) self._context = None def _hint_strings(self, elems): """Calculate the hint strings for elems. Inspired by Vimium. Args: elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ if not elems: return [] hint_mode = self._context.hint_mode if hint_mode == 'word': try: return self._word_hinter.hint(elems) except HintingError as e: message.error(str(e)) # falls back on letter hints if hint_mode == 'number': chars = '0123456789' else: chars = config.val.hints.chars min_chars = config.val.hints.min_chars if config.val.hints.scatter and hint_mode != 'number': return self._hint_scattered(min_chars, chars, elems) else: return self._hint_linear(min_chars, chars, elems) def _hint_scattered(self, min_chars, chars, elems): """Produce scattered hint labels with variable length (like Vimium). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ # Determine how many digits the link hints will require in the worst # case. Usually we do not need all of these digits for every link # single hint, so we can show shorter hints for a few of the links. needed = max(min_chars, utils.ceil_log(len(elems), len(chars))) # Short hints are the number of hints we can possibly show which are # (needed - 1) digits in length. if needed > min_chars and needed > 1: total_space = len(chars) ** needed # For each 1 short link being added, len(chars) long links are # removed, therefore the space removed is len(chars) - 1. short_count = (total_space - len(elems)) // (len(chars) - 1) else: short_count = 0 long_count = len(elems) - short_count strings = [] if needed > 1: for i in range(short_count): strings.append(self._number_to_hint_str(i, chars, needed - 1)) start = short_count * len(chars) for i in range(start, start + long_count): strings.append(self._number_to_hint_str(i, chars, needed)) return self._shuffle_hints(strings, len(chars)) def _hint_linear(self, min_chars, chars, elems): """Produce linear hint labels with constant length (like dwb). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ strings = [] needed = max(min_chars, utils.ceil_log(len(elems), len(chars))) for i in range(len(elems)): strings.append(self._number_to_hint_str(i, chars, needed)) return strings def _shuffle_hints(self, hints, length): """Shuffle the given set of hints so that they're scattered. Hints starting with the same character will be spread evenly throughout the array. Inspired by Vimium. Args: hints: A list of hint strings. length: Length of the available charset. Return: A list of shuffled hint strings. """ buckets = [[] for i in range(length)] for i, hint in enumerate(hints): buckets[i % len(buckets)].append(hint) result = [] for bucket in buckets: result += bucket return result def _number_to_hint_str(self, number, chars, digits=0): """Convert a number like "8" into a hint string like "JK". This is used to sequentially generate all of the hint text. The hint string will be "padded with zeroes" to ensure its length is >= digits. Inspired by Vimium. Args: number: The hint number. chars: The charset to use. digits: The minimum output length. Return: A hint string. """ base = len(chars) hintstr = [] remainder = 0 while True: remainder = number % base hintstr.insert(0, chars[remainder]) number -= remainder number //= base if number <= 0: break # Pad the hint string we're returning so that it matches digits. for _ in range(0, digits - len(hintstr)): hintstr.insert(0, chars[0]) return ''.join(hintstr) def _check_args(self, target, *args): """Check the arguments passed to start() and raise if they're wrong. Args: target: A Target enum member. args: Arguments for userscript/download """ if not isinstance(target, Target): raise TypeError("Target {} is no Target member!".format(target)) if target in [Target.userscript, Target.spawn, Target.run, Target.fill]: if not args: raise cmdutils.CommandError( "'args' is required with target userscript/spawn/run/" "fill.") else: if args: raise cmdutils.CommandError( "'args' is only allowed with target userscript/spawn.") def _filter_matches(self, filterstr, elemstr): """Return True if `filterstr` matches `elemstr`.""" # Empty string and None always match if not filterstr: return True filterstr = filterstr.casefold() elemstr = elemstr.casefold() # Do multi-word matching return all(word in elemstr for word in filterstr.split()) def _filter_matches_exactly(self, filterstr, elemstr): """Return True if `filterstr` exactly matches `elemstr`.""" # Empty string and None never match if not filterstr: return False filterstr = filterstr.casefold() elemstr = elemstr.casefold() return filterstr == elemstr def _start_cb(self, elems): """Initialize the elements and labels based on the context set.""" if self._context is None: log.hints.debug("In _start_cb without context!") return if not elems: message.error("No elements found.") return strings = self._hint_strings(elems) log.hints.debug("hints: {}".format(', '.join(strings))) for elem, string in zip(elems, strings): label = HintLabel(elem, self._context) label.update_text('', string) self._context.all_labels.append(label) self._context.labels[string] = label keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings) message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.set_text(self._get_text()) modeman.enter(self._win_id, usertypes.KeyMode.hint, 'HintManager.start') if self._context.first: self._fire(strings[0]) return # to make auto_follow == 'always' work self._handle_auto_follow() @cmdutils.register(instance='hintmanager', scope='tab', name='hint', star_args_optional=True, maxsplit=2) def start(self, # pylint: disable=keyword-arg-before-vararg group='all', target=Target.normal, *args, mode=None, add_history=False, rapid=False, first=False): """Start hinting. Args: rapid: Whether to do rapid hinting. With rapid hinting, the hint mode isn't left after a hint is followed, so you can easily open multiple links. This is only possible with targets `tab` (with `tabs.background=true`), `tab-bg`, `window`, `run`, `hover`, `userscript` and `spawn`. add_history: Whether to add the spawned or yanked link to the browsing history. first: Click the first hinted element without prompting. group: The element types to hint. - `all`: All clickable elements. - `links`: Only links. - `images`: Only images. - `inputs`: Only input fields. Custom groups can be added via the `hints.selectors` setting and also used here. target: What to do with the selected element. - `normal`: Open the link. - `current`: Open the link in the current tab. - `tab`: Open the link in a new tab (honoring the `tabs.background` setting). - `tab-fg`: Open the link in a new foreground tab. - `tab-bg`: Open the link in a new background tab. - `window`: Open the link in a new window. - `hover` : Hover over the link. - `yank`: Yank the link to the clipboard. - `yank-primary`: Yank the link to the primary selection. - `run`: Run the argument as command. - `fill`: Fill the commandline with the command given as argument. - `download`: Download the link. - `userscript`: Call a userscript with `$QUTE_URL` set to the link. - `spawn`: Spawn a command. mode: The hinting mode to use. - `number`: Use numeric hints. - `letter`: Use the chars in the hints.chars setting. - `word`: Use hint words based on the html elements and the extra words. *args: Arguments for spawn/userscript/run/fill. - With `spawn`: The executable and arguments to spawn. `{hint-url}` will get replaced by the selected URL. - With `userscript`: The userscript to execute. Either store the userscript in `~/.local/share/qutebrowser/userscripts` (or `$XDG_DATA_HOME`), or use an absolute path. - With `fill`: The command to fill the statusbar with. `{hint-url}` will get replaced by the selected URL. - With `run`: Same as `fill`. """ tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tab = tabbed_browser.widget.currentWidget() if tab is None: raise cmdutils.CommandError("No WebView available yet!") mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) if mode_manager.mode == usertypes.KeyMode.hint: modeman.leave(self._win_id, usertypes.KeyMode.hint, 're-hinting') if rapid: if target in [Target.tab_bg, Target.window, Target.run, Target.hover, Target.userscript, Target.spawn, Target.download, Target.normal, Target.current, Target.yank, Target.yank_primary]: pass elif target == Target.tab and config.val.tabs.background: pass else: name = target.name.replace('_', '-') raise cmdutils.CommandError("Rapid hinting makes no sense " "with target {}!".format(name)) self._check_args(target, *args) self._context = HintContext() self._context.tab = tab self._context.target = target self._context.rapid = rapid self._context.hint_mode = self._get_hint_mode(mode) self._context.add_history = add_history self._context.first = first try: self._context.baseurl = tabbed_browser.current_url() except qtutils.QtValueError: raise cmdutils.CommandError("No URL set for this page yet!") self._context.args = list(args) self._context.group = group try: selector = webelem.css_selector(self._context.group, self._context.baseurl) except webelem.Error as e: raise cmdutils.CommandError(str(e)) self._context.tab.elements.find_css( selector, callback=self._start_cb, error_cb=lambda err: message.error(str(err)), only_visible=True) def _get_hint_mode(self, mode): """Get the hinting mode to use based on a mode argument.""" if mode is None: return config.val.hints.mode opt = config.instance.get_opt('hints.mode') try: opt.typ.to_py(mode) except configexc.ValidationError as e: raise cmdutils.CommandError("Invalid mode: {}".format(e)) return mode def current_mode(self): """Return the currently active hinting mode (or None otherwise).""" if self._context is None: return None return self._context.hint_mode def _handle_auto_follow(self, keystr="", filterstr="", visible=None): """Handle the auto_follow option.""" if visible is None: visible = {string: label for string, label in self._context.labels.items() if label.isVisible()} if len(visible) != 1: return auto_follow = config.val.hints.auto_follow if auto_follow == "always": follow = True elif auto_follow == "unique-match": follow = keystr or filterstr elif auto_follow == "full-match": elemstr = str(list(visible.values())[0].elem) filter_match = self._filter_matches_exactly(filterstr, elemstr) follow = (keystr in visible) or filter_match else: follow = False # save the keystr of the only one visible hint to be picked up # later by self.follow_hint self._context.to_follow = list(visible.keys())[0] if follow: # apply auto_follow_timeout timeout = config.val.hints.auto_follow_timeout keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) normal_parser = keyparsers[usertypes.KeyMode.normal] normal_parser.set_inhibited_timeout(timeout) # unpacking gets us the first (and only) key in the dict. self._fire(*visible) def handle_partial_key(self, keystr): """Handle a new partial keypress.""" if self._context is None: log.hints.debug("Got key without context!") return log.hints.debug("Handling new keystring: '{}'".format(keystr)) for string, label in self._context.labels.items(): try: if string.startswith(keystr): matched = string[:len(keystr)] rest = string[len(keystr):] label.update_text(matched, rest) # Show label again if it was hidden before label.show() else: # element doesn't match anymore -> hide it, unless in rapid # mode and hide_unmatched_rapid_hints is false (see #1799) if (not self._context.rapid or config.val.hints.hide_unmatched_rapid_hints): label.hide() except webelem.Error: pass self._handle_auto_follow(keystr=keystr) def filter_hints(self, filterstr): """Filter displayed hints according to a text. Args: filterstr: The string to filter with, or None to use the filter from previous call (saved in `self._context.filterstr`). If `filterstr` is an empty string or if both `filterstr` and `self._context.filterstr` are None, all hints are shown. """ if filterstr is None: filterstr = self._context.filterstr else: self._context.filterstr = filterstr log.hints.debug("Filtering hints on {!r}".format(filterstr)) visible = [] for label in self._context.all_labels: try: if self._filter_matches(filterstr, str(label.elem)): visible.append(label) # Show label again if it was hidden before label.show() else: # element doesn't match anymore -> hide it label.hide() except webelem.Error: pass if not visible: # Whoops, filtered all hints modeman.leave(self._win_id, usertypes.KeyMode.hint, 'all filtered') return if self._context.hint_mode == 'number': # renumber filtered hints strings = self._hint_strings(visible) self._context.labels = {} for label, string in zip(visible, strings): label.update_text('', string) self._context.labels[string] = label keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings, preserve_filter=True) # Note: filter_hints can be called with non-None filterstr only # when number mode is active if filterstr is not None: # pass self._context.labels as the dict of visible hints self._handle_auto_follow(filterstr=filterstr, visible=self._context.labels) def _fire(self, keystr): """Fire a completed hint. Args: keystr: The keychain string to follow. """ # Handlers which take a QWebElement elem_handlers = { Target.normal: self._actions.click, Target.current: self._actions.click, Target.tab: self._actions.click, Target.tab_fg: self._actions.click, Target.tab_bg: self._actions.click, Target.window: self._actions.click, Target.hover: self._actions.click, # _download needs a QWebElement to get the frame. Target.download: self._actions.download, Target.userscript: self._actions.call_userscript, } # Handlers which take a QUrl url_handlers = { Target.yank: self._actions.yank, Target.yank_primary: self._actions.yank, Target.run: self._actions.run_cmd, Target.fill: self._actions.preset_cmd_text, Target.spawn: self._actions.spawn, } elem = self._context.labels[keystr].elem if not elem.has_frame(): message.error("This element has no webframe.") return if self._context.target in elem_handlers: handler = functools.partial(elem_handlers[self._context.target], elem, self._context) elif self._context.target in url_handlers: url = elem.resolve_url(self._context.baseurl) if url is None: message.error("No suitable link found for this element.") return handler = functools.partial(url_handlers[self._context.target], url, self._context) if self._context.add_history: objreg.get('web-history').add_url(url, "") else: raise ValueError("No suitable handler found!") if not self._context.rapid: modeman.leave(self._win_id, usertypes.KeyMode.hint, 'followed', maybe=True) else: # Reset filtering self.filter_hints(None) # Undo keystring highlighting for string, label in self._context.labels.items(): label.update_text('', string) try: handler() except HintingError as e: message.error(str(e)) if self._context is not None: self._context.first_run = False @cmdutils.register(instance='hintmanager', scope='tab', modes=[usertypes.KeyMode.hint]) def follow_hint(self, select=False, keystring=None): """Follow a hint. Args: select: Only select the given hint, don't necessarily follow it. keystring: The hint to follow, or None. """ if keystring is None: if self._context.to_follow is None: raise cmdutils.CommandError("No hint to follow") if select: raise cmdutils.CommandError("Can't use --select without hint.") keystring = self._context.to_follow elif keystring not in self._context.labels: raise cmdutils.CommandError("No hint {}!".format(keystring)) if select: self.handle_partial_key(keystring) else: self._fire(keystring) @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Stop hinting when hinting mode was left.""" if mode != usertypes.KeyMode.hint or self._context is None: # We have one HintManager per tab, so when this gets called, # self._context might be None, because the current tab is not # hinting. return self._cleanup() class WordHinter: """Generator for word hints. Attributes: words: A set of words to be used when no "smart hint" can be derived from the hinted element. """ def __init__(self): # will be initialized on first use. self.words = set() self.dictionary = None def ensure_initialized(self): """Generate the used words if yet uninitialized.""" dictionary = config.val.hints.dictionary if not self.words or self.dictionary != dictionary: self.words.clear() self.dictionary = dictionary try: with open(dictionary, encoding="UTF-8") as wordfile: alphabet = set(ascii_lowercase) hints = set() lines = (line.rstrip().lower() for line in wordfile) for word in lines: if set(word) - alphabet: # contains none-alphabetic chars continue if len(word) > 4: # we don't need words longer than 4 continue for i in range(len(word)): # remove all prefixes of this word hints.discard(word[:i + 1]) hints.add(word) self.words.update(hints) except IOError as e: error = "Word hints requires reading the file at {}: {}" raise HintingError(error.format(dictionary, str(e))) def extract_tag_words(self, elem): """Extract tag words form the given element.""" attr_extractors = { "alt": lambda elem: elem["alt"], "name": lambda elem: elem["name"], "title": lambda elem: elem["title"], "placeholder": lambda elem: elem["placeholder"], "src": lambda elem: elem["src"].split('/')[-1], "href": lambda elem: elem["href"].split('/')[-1], "text": str, } extractable_attrs = collections.defaultdict(list, { "img": ["alt", "title", "src"], "a": ["title", "href", "text"], "input": ["name", "placeholder"], "textarea": ["name", "placeholder"], "button": ["text"] }) return (attr_extractors[attr](elem) for attr in extractable_attrs[elem.tag_name()] if attr in elem or attr == "text") def tag_words_to_hints(self, words): """Take words and transform them to proper hints if possible.""" for candidate in words: if not candidate: continue match = re.search('[A-Za-z]{3,}', candidate) if not match: continue if 4 < match.end() - match.start() < 8: yield candidate[match.start():match.end()].lower() def any_prefix(self, hint, existing): return any(hint.startswith(e) or e.startswith(hint) for e in existing) def filter_prefixes(self, hints, existing): return (h for h in hints if not self.any_prefix(h, existing)) def new_hint_for(self, elem, existing, fallback): """Return a hint for elem, not conflicting with the existing.""" new = self.tag_words_to_hints(self.extract_tag_words(elem)) new_no_prefixes = self.filter_prefixes(new, existing) fallback_no_prefixes = self.filter_prefixes(fallback, existing) # either the first good, or None return (next(new_no_prefixes, None) or next(fallback_no_prefixes, None)) def hint(self, elems): """Produce hint labels based on the html tags. Produce hint words based on the link text and random words from the words arg as fallback. Args: words: Words to use as fallback when no link text can be used. elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ self.ensure_initialized() hints = [] used_hints = set() words = iter(self.words) for elem in elems: hint = self.new_hint_for(elem, used_hints, words) if not hint: raise HintingError("Not enough words in the dictionary.") used_hints.add(hint) hints.append(hint) return hints
1
23,562
Is there any case that tab is None?
qutebrowser-qutebrowser
py
@@ -132,8 +132,14 @@ func (e *endpoint) Apply(ctx context.Context, req ctrl.Request, chaos v1alpha1.I Namespace: pod.Namespace, }) - t.Append(sourceSet) - t.Append(targetSet) + e := t.Append(sourceSet) + if e != nil { + return e + } + e = t.Append(targetSet) + if e != nil { + return e + } } sourcesChains := []v1alpha1.RawIptables{}
1
// Copyright 2019 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package partition import ( "context" "errors" "fmt" "github.com/hashicorp/go-multierror" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" "github.com/chaos-mesh/chaos-mesh/controllers/common" "github.com/chaos-mesh/chaos-mesh/controllers/config" "github.com/chaos-mesh/chaos-mesh/controllers/networkchaos/podnetworkchaosmanager" "github.com/chaos-mesh/chaos-mesh/controllers/podnetworkchaos/ipset" "github.com/chaos-mesh/chaos-mesh/controllers/podnetworkchaos/iptable" "github.com/chaos-mesh/chaos-mesh/controllers/podnetworkchaos/netutils" pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb" "github.com/chaos-mesh/chaos-mesh/pkg/events" "github.com/chaos-mesh/chaos-mesh/pkg/finalizer" "github.com/chaos-mesh/chaos-mesh/pkg/router" ctx "github.com/chaos-mesh/chaos-mesh/pkg/router/context" end "github.com/chaos-mesh/chaos-mesh/pkg/router/endpoint" "github.com/chaos-mesh/chaos-mesh/pkg/selector" ) const ( networkPartitionActionMsg = " partition network duration %s" networkChaosSourceMsg = "This is a source pod." networkChaosTargetMsg = "This is a target pod." sourceIPSetPostFix = "src" targetIPSetPostFix = "tgt" ) type endpoint struct { ctx.Context } // Object implements the reconciler.InnerReconciler.Object func (e *endpoint) Object() v1alpha1.InnerObject { return &v1alpha1.NetworkChaos{} } // Apply applies the chaos operation func (e *endpoint) Apply(ctx context.Context, req ctrl.Request, chaos v1alpha1.InnerObject) error { e.Log.Info("Applying network partition") networkchaos, ok := chaos.(*v1alpha1.NetworkChaos) if !ok { err := errors.New("chaos is not NetworkChaos") e.Log.Error(err, "chaos is not NetworkChaos", "chaos", chaos) return err } source := networkchaos.Namespace + "/" + networkchaos.Name m := podnetworkchaosmanager.New(source, e.Log, e.Client) sources, err := selector.SelectAndFilterPods(ctx, e.Client, e.Reader, &networkchaos.Spec, config.ControllerCfg.ClusterScoped, config.ControllerCfg.TargetNamespace, config.ControllerCfg.EnableFilterNamespace) if err != nil { e.Log.Error(err, "failed to select and filter source pods") return err } var targets []v1.Pod if networkchaos.Spec.Target != nil { targets, err = selector.SelectAndFilterPods(ctx, e.Client, e.Reader, networkchaos.Spec.Target, config.ControllerCfg.ClusterScoped, config.ControllerCfg.TargetNamespace, config.ControllerCfg.EnableFilterNamespace) if err != nil { e.Log.Error(err, "failed to select and filter target pods") return err } } sourceSet := ipset.BuildIPSet(sources, []string{}, networkchaos, sourceIPSetPostFix, source) externalCidrs, err := netutils.ResolveCidrs(networkchaos.Spec.ExternalTargets) if err != nil { e.Log.Error(err, "failed to resolve external targets") return err } targetSet := ipset.BuildIPSet(targets, externalCidrs, networkchaos, targetIPSetPostFix, source) allPods := append(sources, targets...) type podPositionTuple struct { Pod v1.Pod Position string } keyPodMap := make(map[types.NamespacedName]podPositionTuple) for index, pod := range allPods { position := "" if index < len(sources) { position = "source" } else { position = "target" } keyPodMap[types.NamespacedName{ Name: pod.Name, Namespace: pod.Namespace, }] = podPositionTuple{ Pod: pod, Position: position, } } // Set up ipset in every related pods for index := range allPods { pod := allPods[index] e.Log.Info("PODS", "name", pod.Name, "namespace", pod.Namespace) t := m.WithInit(types.NamespacedName{ Name: pod.Name, Namespace: pod.Namespace, }) t.Append(sourceSet) t.Append(targetSet) } sourcesChains := []v1alpha1.RawIptables{} targetsChains := []v1alpha1.RawIptables{} if networkchaos.Spec.Direction == v1alpha1.To || networkchaos.Spec.Direction == v1alpha1.Both { sourcesChains = append(sourcesChains, v1alpha1.RawIptables{ Name: iptable.GenerateName(pb.Chain_OUTPUT, networkchaos), Direction: v1alpha1.Output, IPSets: []string{targetSet.Name}, RawRuleSource: v1alpha1.RawRuleSource{ Source: source, }, }) targetsChains = append(targetsChains, v1alpha1.RawIptables{ Name: iptable.GenerateName(pb.Chain_INPUT, networkchaos), Direction: v1alpha1.Input, IPSets: []string{sourceSet.Name}, RawRuleSource: v1alpha1.RawRuleSource{ Source: source, }, }) } if networkchaos.Spec.Direction == v1alpha1.From || networkchaos.Spec.Direction == v1alpha1.Both { sourcesChains = append(sourcesChains, v1alpha1.RawIptables{ Name: iptable.GenerateName(pb.Chain_INPUT, networkchaos), Direction: v1alpha1.Input, IPSets: []string{targetSet.Name}, RawRuleSource: v1alpha1.RawRuleSource{ Source: source, }, }) targetsChains = append(targetsChains, v1alpha1.RawIptables{ Name: iptable.GenerateName(pb.Chain_OUTPUT, networkchaos), Direction: v1alpha1.Output, IPSets: []string{sourceSet.Name}, RawRuleSource: v1alpha1.RawRuleSource{ Source: source, }, }) } e.Log.Info("chains prepared", "sourcesChains", sourcesChains, "targetsChains", targetsChains) err = e.SetChains(ctx, sources, sourcesChains, m, networkchaos) if err != nil { return err } err = e.SetChains(ctx, targets, targetsChains, m, networkchaos) if err != nil { return err } responses := m.Commit(ctx) networkchaos.Status.Experiment.PodRecords = make([]v1alpha1.PodStatus, 0, len(allPods)) for _, keyErrorTuple := range responses { key := keyErrorTuple.Key err := keyErrorTuple.Err if err != nil { if err != podnetworkchaosmanager.ErrPodNotFound && err != podnetworkchaosmanager.ErrPodNotRunning { e.Log.Error(err, "fail to commit") } else { e.Log.Info("pod is not found or not running", "key", key) } return err } pod := keyPodMap[keyErrorTuple.Key] ps := v1alpha1.PodStatus{ Namespace: pod.Pod.Namespace, Name: pod.Pod.Name, HostIP: pod.Pod.Status.HostIP, PodIP: pod.Pod.Status.PodIP, Action: string(networkchaos.Spec.Action), } if pod.Position == "source" { ps.Message = networkChaosSourceMsg } else { ps.Message = networkChaosTargetMsg } // TODO: add source, target and tc action message if networkchaos.Spec.Duration != nil { ps.Message += fmt.Sprintf(networkPartitionActionMsg, *networkchaos.Spec.Duration) } networkchaos.Status.Experiment.PodRecords = append(networkchaos.Status.Experiment.PodRecords, ps) } e.Event(networkchaos, v1.EventTypeNormal, events.ChaosInjected, "") return nil } // SetChains sets iptables chains for pods func (e *endpoint) SetChains(ctx context.Context, pods []v1.Pod, chains []v1alpha1.RawIptables, m *podnetworkchaosmanager.PodNetworkManager, networkchaos *v1alpha1.NetworkChaos) error { for index := range pods { pod := &pods[index] key, err := cache.MetaNamespaceKeyFunc(pod) if err != nil { return err } t := m.WithInit(types.NamespacedName{ Name: pod.Name, Namespace: pod.Namespace, }) for _, chain := range chains { t.Append(chain) } networkchaos.Finalizers = finalizer.InsertFinalizer(networkchaos.Finalizers, key) } return nil } // Recover means the reconciler recovers the chaos action func (e *endpoint) Recover(ctx context.Context, req ctrl.Request, chaos v1alpha1.InnerObject) error { networkchaos, ok := chaos.(*v1alpha1.NetworkChaos) if !ok { err := errors.New("chaos is not NetworkChaos") e.Log.Error(err, "chaos is not NetworkChaos", "chaos", chaos) return err } if err := e.cleanFinalizersAndRecover(ctx, networkchaos); err != nil { return err } e.Event(networkchaos, v1.EventTypeNormal, events.ChaosRecovered, "") return nil } func (e *endpoint) cleanFinalizersAndRecover(ctx context.Context, chaos *v1alpha1.NetworkChaos) error { var result error source := chaos.Namespace + "/" + chaos.Name m := podnetworkchaosmanager.New(source, e.Log, e.Client) for _, key := range chaos.Finalizers { ns, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { result = multierror.Append(result, err) continue } _ = m.WithInit(types.NamespacedName{ Namespace: ns, Name: name, }) } responses := m.Commit(ctx) for _, response := range responses { key := response.Key err := response.Err // if pod not found or not running, directly return and giveup recover. if err != nil { if err != podnetworkchaosmanager.ErrPodNotFound && err != podnetworkchaosmanager.ErrPodNotRunning { e.Log.Error(err, "fail to commit", "key", key) result = multierror.Append(result, err) continue } e.Log.Info("pod is not found or not running", "key", key) } chaos.Finalizers = finalizer.RemoveFromFinalizer(chaos.Finalizers, response.Key.String()) } e.Log.Info("After recovering", "finalizers", chaos.Finalizers) if chaos.Annotations[common.AnnotationCleanFinalizer] == common.AnnotationCleanFinalizerForced { e.Log.Info("Force cleanup all finalizers", "chaos", chaos) chaos.Finalizers = make([]string, 0) return nil } return result } func init() { router.Register("networkchaos", &v1alpha1.NetworkChaos{}, func(obj runtime.Object) bool { chaos, ok := obj.(*v1alpha1.NetworkChaos) if !ok { return false } return chaos.Spec.Action == v1alpha1.PartitionAction }, func(ctx ctx.Context) end.Endpoint { return &endpoint{ Context: ctx, } }) }
1
21,520
pls use `err` instead of `e`
chaos-mesh-chaos-mesh
go
@@ -159,7 +159,9 @@ function attachGamepadScript(e) { } // No need to check for gamepads manually at load time, the eventhandler will be fired for that -window.addEventListener("gamepadconnected", attachGamepadScript); +if (navigator.getGamepads) { /* eslint-disable-line compat/compat */ + window.addEventListener("gamepadconnected", attachGamepadScript); +} export default { enable: enable,
1
/** * Module for performing keyboard navigation. * @module components/input/keyboardnavigation */ import inputManager from "inputManager"; import layoutManager from "layoutManager"; /** * Key name mapping. */ const KeyNames = { 13: "Enter", 19: "Pause", 27: "Escape", 32: "Space", 37: "ArrowLeft", 38: "ArrowUp", 39: "ArrowRight", 40: "ArrowDown", // MediaRewind (Tizen/WebOS) 412: "MediaRewind", // MediaStop (Tizen/WebOS) 413: "MediaStop", // MediaPlay (Tizen/WebOS) 415: "MediaPlay", // MediaFastForward (Tizen/WebOS) 417: "MediaFastForward", // Back (WebOS) 461: "Back", // Back (Tizen) 10009: "Back", // MediaTrackPrevious (Tizen) 10232: "MediaTrackPrevious", // MediaTrackNext (Tizen) 10233: "MediaTrackNext", // MediaPlayPause (Tizen) 10252: "MediaPlayPause" }; /** * Keys used for keyboard navigation. */ const NavigationKeys = ["ArrowLeft", "ArrowRight", "ArrowUp", "ArrowDown"]; let hasFieldKey = false; try { hasFieldKey = "key" in new KeyboardEvent("keydown"); } catch (e) { console.error("error checking 'key' field"); } if (!hasFieldKey) { // Add [a..z] for (let i = 65; i <= 90; i++) { KeyNames[i] = String.fromCharCode(i).toLowerCase(); } } /** * Returns key name from event. * * @param {KeyboardEvent} event - Keyboard event. * @return {string} Key name. */ export function getKeyName(event) { return KeyNames[event.keyCode] || event.key; } /** * Returns _true_ if key is used for navigation. * * @param {string} key - Key name. * @return {boolean} _true_ if key is used for navigation. */ export function isNavigationKey(key) { return NavigationKeys.indexOf(key) != -1; } export function enable() { document.addEventListener("keydown", function (e) { const key = getKeyName(e); // Ignore navigation keys for non-TV if (!layoutManager.tv && isNavigationKey(key)) { return; } let capture = true; switch (key) { case "ArrowLeft": inputManager.handle("left"); break; case "ArrowUp": inputManager.handle("up"); break; case "ArrowRight": inputManager.handle("right"); break; case "ArrowDown": inputManager.handle("down"); break; case "Back": inputManager.handle("back"); break; case "Escape": if (layoutManager.tv) { inputManager.handle("back"); } else { capture = false; } break; case "MediaPlay": inputManager.handle("play"); break; case "Pause": inputManager.handle("pause"); break; case "MediaPlayPause": inputManager.handle("playpause"); break; case "MediaRewind": inputManager.handle("rewind"); break; case "MediaFastForward": inputManager.handle("fastforward"); break; case "MediaStop": inputManager.handle("stop"); break; case "MediaTrackPrevious": inputManager.handle("previoustrack"); break; case "MediaTrackNext": inputManager.handle("nexttrack"); break; default: capture = false; } if (capture) { console.debug("disabling default event handling"); e.preventDefault(); } }); } // Gamepad initialisation. No script is required if no gamepads are present at init time, saving a bit of resources. // Whenever the gamepad is connected, we hand all the control of the gamepad to gamepadtokey.js by removing the event handler function attachGamepadScript(e) { console.log("Gamepad connected! Attaching gamepadtokey.js script"); window.removeEventListener("gamepadconnected", attachGamepadScript); require(["components/input/gamepadtokey"]); } // No need to check for gamepads manually at load time, the eventhandler will be fired for that window.addEventListener("gamepadconnected", attachGamepadScript); export default { enable: enable, getKeyName: getKeyName, isNavigationKey: isNavigationKey };
1
14,337
`attachGamepadScript` above can also be placed inside.
jellyfin-jellyfin-web
js
@@ -108,7 +108,7 @@ func decodeBytecode() ([]byte, error) { return hex.DecodeString(util.TrimHexPrefix(bytecodeFlag.Value().(string))) } -func signer() (address string, err error) { +func Signer() (address string, err error) { return util.GetAddress(signerFlag.Value().(string)) }
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "context" "encoding/hex" "fmt" "math/big" "strings" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" "github.com/spf13/cobra" "google.golang.org/grpc/status" "github.com/iotexproject/go-pkgs/crypto" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/ioctl/cmd/account" "github.com/iotexproject/iotex-core/ioctl/config" "github.com/iotexproject/iotex-core/ioctl/flag" "github.com/iotexproject/iotex-core/ioctl/output" "github.com/iotexproject/iotex-core/ioctl/util" "github.com/iotexproject/iotex-core/pkg/unit" "github.com/iotexproject/iotex-core/pkg/util/byteutil" ) // Multi-language support var ( actionCmdShorts = map[config.Language]string{ config.English: "Manage actions of IoTeX blockchain", config.Chinese: "管理IoTex区块链的行为", // this translation } actionCmdUses = map[config.Language]string{ config.English: "action", config.Chinese: "action 行为", // this translation } flagActionEndPointUsages = map[config.Language]string{ config.English: "set endpoint for once", config.Chinese: "一次设置端点", // this translation } flagActionInsecureUsages = map[config.Language]string{ config.English: "insecure connection for once", config.Chinese: "一次不安全连接", // this translation } ) const defaultGasLimit = uint64(20000000) var defaultGasPrice = big.NewInt(unit.Qev) // Flags var ( gasLimitFlag = flag.NewUint64VarP("gas-limit", "l", 0, "set gas limit") gasPriceFlag = flag.NewStringVarP("gas-price", "p", "1", "set gas price (unit: 10^(-6)IOTX), use suggested gas price if input is \"0\"") nonceFlag = flag.NewUint64VarP("nonce", "n", 0, "set nonce (default using pending nonce)") signerFlag = flag.NewStringVarP("signer", "s", "", "choose a signing account") bytecodeFlag = flag.NewStringVarP("bytecode", "b", "", "set the byte code") yesFlag = flag.BoolVarP("assume-yes", "y", false, " answer yes for all confirmations") passwordFlag = flag.NewStringVarP("password", "P", "", "input password for account") ) // ActionCmd represents the action command var ActionCmd = &cobra.Command{ Use: config.TranslateInLang(actionCmdUses, config.UILanguage), Short: config.TranslateInLang(actionCmdShorts, config.UILanguage), } type sendMessage struct { Info string `json:"info"` TxHash string `json:"txHash"` URL string `json:"url"` } func (m *sendMessage) String() string { if output.Format == "" { return fmt.Sprintf("%s\nWait for several seconds and query this action by hash:%s", m.Info, m.URL) } return output.FormatString(output.Result, m) } func init() { ActionCmd.AddCommand(actionHashCmd) ActionCmd.AddCommand(actionTransferCmd) ActionCmd.AddCommand(actionDeployCmd) ActionCmd.AddCommand(actionInvokeCmd) ActionCmd.AddCommand(actionReadCmd) ActionCmd.AddCommand(actionClaimCmd) ActionCmd.AddCommand(actionDepositCmd) ActionCmd.AddCommand(actionSendRawCmd) ActionCmd.PersistentFlags().StringVar(&config.ReadConfig.Endpoint, "endpoint", config.ReadConfig.Endpoint, config.TranslateInLang(flagActionEndPointUsages, config.UILanguage)) ActionCmd.PersistentFlags().BoolVar(&config.Insecure, "insecure", config.Insecure, config.TranslateInLang(flagActionInsecureUsages, config.UILanguage)) } func decodeBytecode() ([]byte, error) { return hex.DecodeString(util.TrimHexPrefix(bytecodeFlag.Value().(string))) } func signer() (address string, err error) { return util.GetAddress(signerFlag.Value().(string)) } func nonce(executor string) (uint64, error) { nonce := nonceFlag.Value().(uint64) if nonce != 0 { return nonce, nil } accountMeta, err := account.GetAccountMeta(executor) if err != nil { return 0, output.NewError(0, "failed to get account meta", err) } return accountMeta.PendingNonce, nil } // RegisterWriteCommand registers action flags for command func RegisterWriteCommand(cmd *cobra.Command) { gasLimitFlag.RegisterCommand(cmd) gasPriceFlag.RegisterCommand(cmd) signerFlag.RegisterCommand(cmd) nonceFlag.RegisterCommand(cmd) yesFlag.RegisterCommand(cmd) passwordFlag.RegisterCommand(cmd) } // gasPriceInRau returns the suggest gas price func gasPriceInRau() (*big.Int, error) { if account.CryptoSm2 { return big.NewInt(0), nil } gasPrice := gasPriceFlag.Value().(string) if len(gasPrice) != 0 { return util.StringToRau(gasPrice, util.GasPriceDecimalNum) } conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure) if err != nil { return nil, output.NewError(output.NetworkError, "failed to connect to endpoint", err) } defer conn.Close() cli := iotexapi.NewAPIServiceClient(conn) ctx := context.Background() jwtMD, err := util.JwtAuth() if err == nil { ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx) } request := &iotexapi.SuggestGasPriceRequest{} response, err := cli.SuggestGasPrice(ctx, request) if err != nil { sta, ok := status.FromError(err) if ok { return nil, output.NewError(output.APIError, sta.Message(), nil) } return nil, output.NewError(output.NetworkError, "failed to invoke SuggestGasPrice api", err) } return new(big.Int).SetUint64(response.GasPrice), nil } func fixGasLimit(caller string, execution *action.Execution) (*action.Execution, error) { conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure) if err != nil { return nil, output.NewError(output.NetworkError, "failed to connect to endpoint", err) } defer conn.Close() cli := iotexapi.NewAPIServiceClient(conn) request := &iotexapi.EstimateActionGasConsumptionRequest{ Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{ Execution: execution.Proto(), }, CallerAddress: caller, } ctx := context.Background() jwtMD, err := util.JwtAuth() if err == nil { ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx) } res, err := cli.EstimateActionGasConsumption(ctx, request) if err != nil { sta, ok := status.FromError(err) if ok { return nil, output.NewError(output.APIError, sta.Message(), nil) } return nil, output.NewError(output.NetworkError, "failed to invoke EstimateActionGasConsumption api", err) } return action.NewExecution(execution.Contract(), execution.Nonce(), execution.Amount(), res.Gas, execution.GasPrice(), execution.Data()) } // SendRaw sends raw action to blockchain func SendRaw(selp *iotextypes.Action) error { conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure) if err != nil { return output.NewError(output.NetworkError, "failed to connect to endpoint", err) } defer conn.Close() cli := iotexapi.NewAPIServiceClient(conn) ctx := context.Background() jwtMD, err := util.JwtAuth() if err == nil { ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx) } request := &iotexapi.SendActionRequest{Action: selp} if _, err = cli.SendAction(ctx, request); err != nil { if sta, ok := status.FromError(err); ok { return output.NewError(output.APIError, sta.Message(), nil) } return output.NewError(output.NetworkError, "failed to invoke SendAction api", err) } shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp))) txhash := hex.EncodeToString(shash[:]) message := sendMessage{Info: "Action has been sent to blockchain.", TxHash: txhash} switch config.ReadConfig.Explorer { case "iotexscan": message.URL = "iotexscan.io/action/" + txhash case "iotxplorer": message.URL = "iotxplorer.io/actions/" + txhash default: message.URL = config.ReadConfig.Explorer + txhash } fmt.Println(message.String()) return nil } // SendAction sends signed action to blockchain func SendAction(elp action.Envelope, signer string) error { var prvKey crypto.PrivateKey var err error if account.IsSignerExist(signer) { // Get signer's password password := passwordFlag.Value().(string) if password == "" { output.PrintQuery(fmt.Sprintf("Enter password #%s:\n", signer)) password, err = util.ReadSecretFromStdin() if err != nil { return output.NewError(output.InputError, "failed to get password", err) } } prvKey, err = account.LocalAccountToPrivateKey(signer, password) if err != nil { return output.NewError(output.KeystoreError, "failed to get private key from keystore", err) } } else { // Get private key output.PrintQuery(fmt.Sprintf("Enter private key #%s:", signer)) prvKeyString, err := util.ReadSecretFromStdin() if err != nil { return output.NewError(output.InputError, "failed to get private key", err) } prvKey, err = crypto.HexStringToPrivateKey(prvKeyString) if err != nil { return output.NewError(output.InputError, "failed to HexString private key", err) } } defer prvKey.Zero() sealed, err := action.Sign(elp, prvKey) prvKey.Zero() if err != nil { return output.NewError(output.CryptoError, "failed to sign action", err) } if err := isBalanceEnough(signer, sealed); err != nil { return output.NewError(0, "failed to pass balance check", err) // TODO: undefined error } selp := sealed.Proto() actionInfo, err := printActionProto(selp) if err != nil { return output.NewError(0, "failed to print action proto message", err) } if yesFlag.Value() == false { var confirm string info := fmt.Sprintln(actionInfo + "\nPlease confirm your action.\n") message := output.ConfirmationMessage{Info: info, Options: []string{"yes"}} fmt.Println(message.String()) fmt.Scanf("%s", &confirm) if !strings.EqualFold(confirm, "yes") { output.PrintResult("quit") return nil } } return SendRaw(selp) } // Execute sends signed execution transaction to blockchain func Execute(contract string, amount *big.Int, bytecode []byte) error { gasPriceRau, err := gasPriceInRau() if err != nil { return output.NewError(0, "failed to get gas price", err) } signer, err := signer() if err != nil { return output.NewError(output.AddressError, "failed to get signer address", err) } nonce, err := nonce(signer) if err != nil { return output.NewError(0, "failed to get nonce", err) } gasLimit := gasLimitFlag.Value().(uint64) tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode) if err != nil || tx == nil { return output.NewError(output.InstantiationError, "failed to make a Execution instance", err) } if gasLimit == 0 { tx, err = fixGasLimit(signer, tx) if err != nil || tx == nil { return output.NewError(0, "failed to fix Execution gaslimit", err) } gasLimit = tx.GasLimit() } return SendAction( (&action.EnvelopeBuilder{}). SetNonce(nonce). SetGasPrice(gasPriceRau). SetGasLimit(gasLimit). SetAction(tx).Build(), signer, ) } // Read reads smart contract on IoTeX blockchain func Read(contract address.Address, amount *big.Int, bytecode []byte) (string, error) { caller, err := signer() if err != nil { caller = address.ZeroAddress } exec, err := action.NewExecution(contract.String(), 0, amount, defaultGasLimit, defaultGasPrice, bytecode) if err != nil { return "", output.NewError(output.InstantiationError, "cannot make an Execution instance", err) } conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure) if err != nil { return "", output.NewError(output.NetworkError, "failed to connect to endpoint", err) } defer conn.Close() ctx := context.Background() jwtMD, err := util.JwtAuth() if err == nil { ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx) } res, err := iotexapi.NewAPIServiceClient(conn).ReadContract( ctx, &iotexapi.ReadContractRequest{ Execution: exec.Proto(), CallerAddress: caller, }, ) if err == nil { return res.Data, nil } if sta, ok := status.FromError(err); ok { return "", output.NewError(output.APIError, sta.Message(), nil) } return "", output.NewError(output.NetworkError, "failed to invoke ReadContract api", err) } func isBalanceEnough(address string, act action.SealedEnvelope) error { accountMeta, err := account.GetAccountMeta(address) if err != nil { return output.NewError(0, "failed to get account meta", err) } balance, ok := big.NewInt(0).SetString(accountMeta.Balance, 10) if !ok { return output.NewError(output.ConvertError, "failed to convert balance into big int", nil) } cost, err := act.Cost() if err != nil { return output.NewError(output.RuntimeError, "failed to check cost of an action", nil) } if balance.Cmp(cost) < 0 { return output.NewError(output.ValidationError, "balance is not enough", nil) } return nil }
1
21,902
when we use RegisterWriteCommand,this func should be exported,so we can use this to get signer at the outside action package
iotexproject-iotex-core
go
@@ -123,7 +123,7 @@ func (management *Management) serveNewConnection(connection net.Conn) { func (management *Management) deliverLines() { for { line := <-management.lineReceiver - log.Debug(management.logPrefix, "Line delivering: ", line) + // log.Debug(management.logPrefix, "Line delivering: ", line) lineConsumed := false for _, middleware := range management.middlewares {
1
package openvpn import ( "bufio" "net" "net/textproto" "sync" "time" log "github.com/cihub/seelog" ) // https://openvpn.net/index.php/open-source/documentation/miscellaneous/79-management-interface.html type Management struct { socketAddress string logPrefix string lineReceiver chan string middlewares []ManagementMiddleware listenerShutdownStarted chan bool listenerShutdownWaiter sync.WaitGroup } type ManagementMiddleware interface { Start(connection net.Conn) error Stop() error ConsumeLine(line string) (consumed bool, err error) } func NewManagement(socketAddress, logPrefix string, middlewares ...ManagementMiddleware) *Management { return &Management{ socketAddress: socketAddress, logPrefix: logPrefix, lineReceiver: make(chan string), middlewares: middlewares, listenerShutdownStarted: make(chan bool), listenerShutdownWaiter: sync.WaitGroup{}, } } func (management *Management) Start() error { log.Info(management.logPrefix, "Connecting to socket:", management.socketAddress) listener, err := net.Listen("unix", management.socketAddress) if err != nil { log.Error(management.logPrefix, err) return err } go management.waitForShutdown(listener) go management.deliverLines() go management.waitForConnections(listener) return nil } func (management *Management) Stop() { log.Info(management.logPrefix, "Shutdown") close(management.listenerShutdownStarted) management.listenerShutdownWaiter.Wait() log.Info(management.logPrefix, "Shutdown finished") } func (management *Management) waitForShutdown(listener net.Listener) { <-management.listenerShutdownStarted for _, middleware := range management.middlewares { middleware.Stop() } listener.Close() } func (management *Management) waitForConnections(listener net.Listener) { management.listenerShutdownWaiter.Add(1) defer management.listenerShutdownWaiter.Done() for { connection, err := listener.Accept() if err != nil { select { case <-management.listenerShutdownStarted: log.Info(management.logPrefix, "Connection closed") default: log.Critical(management.logPrefix, "Connection accept error: ", err) } return } go management.serveNewConnection(connection) } } func (management *Management) serveNewConnection(connection net.Conn) { log.Info(management.logPrefix, "New connection started") for _, middleware := range management.middlewares { middleware.Start(connection) } reader := textproto.NewReader(bufio.NewReader(connection)) for { line, err := reader.ReadLine() if err != nil { log.Warn(management.logPrefix, "Connection failed to read. ", err) return } log.Debug(management.logPrefix, "Line received: ", line) // Try to deliver the message select { case management.lineReceiver <- line: case <-time.After(time.Second): log.Error(management.logPrefix, "Failed to transport line: ", line) } } } func (management *Management) deliverLines() { for { line := <-management.lineReceiver log.Debug(management.logPrefix, "Line delivering: ", line) lineConsumed := false for _, middleware := range management.middlewares { consumed, err := middleware.ConsumeLine(line) if err != nil { log.Error(management.logPrefix, "Failed to deliver line: ", line, ". ", err) } lineConsumed = lineConsumed || consumed } if !lineConsumed { log.Warn(management.logPrefix, "Line not delivered: ", line) } } }
1
10,385
Dead code - we should remove it or use `log.Trace` for lower logging level. Same applies for change at line 138.
mysteriumnetwork-node
go
@@ -55,6 +55,11 @@ boost::optional<ParameterT> parseParameters(std::string::iterator &iter, // iterator to the failing position. Extract the position from the exception ourselves. iter = failure.first; } + catch (const boost::numeric::bad_numeric_cast &e) + { + // this can happen if we get bad numeric values in the request, just handle + // as normal parser error + } return boost::none; }
1
#include "server/api/parameters_parser.hpp" #include "server/api/match_parameter_grammar.hpp" #include "server/api/nearest_parameter_grammar.hpp" #include "server/api/route_parameters_grammar.hpp" #include "server/api/table_parameter_grammar.hpp" #include "server/api/tile_parameter_grammar.hpp" #include "server/api/trip_parameter_grammar.hpp" #include <type_traits> namespace osrm { namespace server { namespace api { namespace detail { template <typename T> using is_grammar_t = std::integral_constant<bool, std::is_same<RouteParametersGrammar<>, T>::value || std::is_same<TableParametersGrammar<>, T>::value || std::is_same<NearestParametersGrammar<>, T>::value || std::is_same<TripParametersGrammar<>, T>::value || std::is_same<MatchParametersGrammar<>, T>::value || std::is_same<TileParametersGrammar<>, T>::value>; template <typename ParameterT, typename GrammarT, typename std::enable_if<detail::is_parameter_t<ParameterT>::value, int>::type = 0, typename std::enable_if<detail::is_grammar_t<GrammarT>::value, int>::type = 0> boost::optional<ParameterT> parseParameters(std::string::iterator &iter, const std::string::iterator end) { using It = std::decay<decltype(iter)>::type; static const GrammarT grammar; try { ParameterT parameters; const auto ok = boost::spirit::qi::parse(iter, end, grammar(boost::phoenix::ref(parameters))); // return move(a.b) is needed to move b out of a and then return the rvalue by implicit move if (ok && iter == end) return std::move(parameters); } catch (const qi::expectation_failure<It> &failure) { // The grammar above using expectation parsers ">" does not automatically increment the // iterator to the failing position. Extract the position from the exception ourselves. iter = failure.first; } return boost::none; } } // ns detail template <> boost::optional<engine::api::RouteParameters> parseParameters(std::string::iterator &iter, const std::string::iterator end) { return detail::parseParameters<engine::api::RouteParameters, RouteParametersGrammar<>>(iter, end); } template <> boost::optional<engine::api::TableParameters> parseParameters(std::string::iterator &iter, const std::string::iterator end) { return detail::parseParameters<engine::api::TableParameters, TableParametersGrammar<>>(iter, end); } template <> boost::optional<engine::api::NearestParameters> parseParameters(std::string::iterator &iter, const std::string::iterator end) { return detail::parseParameters<engine::api::NearestParameters, NearestParametersGrammar<>>(iter, end); } template <> boost::optional<engine::api::TripParameters> parseParameters(std::string::iterator &iter, const std::string::iterator end) { return detail::parseParameters<engine::api::TripParameters, TripParametersGrammar<>>(iter, end); } template <> boost::optional<engine::api::MatchParameters> parseParameters(std::string::iterator &iter, const std::string::iterator end) { return detail::parseParameters<engine::api::MatchParameters, MatchParametersGrammar<>>(iter, end); } template <> boost::optional<engine::api::TileParameters> parseParameters(std::string::iterator &iter, const std::string::iterator end) { return detail::parseParameters<engine::api::TileParameters, TileParametersGrammar<>>(iter, end); } } // ns api } // ns server } // ns osrm
1
17,982
How does this make sure invalid coordinates making the parser fail? Relying on the Coordinate type's boost numeric_cast calls in the ctor? Shouldn't we rely on a coordinate's `.IsValid()` member function instead?
Project-OSRM-osrm-backend
cpp
@@ -1,8 +1,11 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX - License - Identifier: Apache - 2.0 +# Purpose # This code example demonstrates how to upload an item -# (file) to a folder within a bucket in Amazon S3. +# (file) to a folder within a bucket in Amazon Simple Storage Solution (Amazon S3). + +# snippet-start:[s3.ruby.s3-ruby-example-upload-item-to-folder] # Prerequisites: # - An existing Amazon S3 bucket.
1
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX - License - Identifier: Apache - 2.0 # This code example demonstrates how to upload an item # (file) to a folder within a bucket in Amazon S3. # Prerequisites: # - An existing Amazon S3 bucket. # - An existing folder within the bucket. # - An existing file to upload to the folder. require 'aws-sdk-s3' # Checks whether a file exists and is indeed a file. # # @param file_name [String] The name of the file. # @return [Boolean] true if the file exists and is indeed a file; # otherwise, false. # @example # exit 1 unless file_exists_and_file?('my-file.txt') def file_exists_and_file?(file_name) return true if File.exist?(file_name) && File.file?(file_name) end # Checks whether a bucket exists in Amazon S3. # # @param s3_client [Aws::S3::Client] An initialized Amazon S3 client. # @param bucket_name [String] The name of the bucket. # @return [Boolean] true if the bucket exists; otherwise, false. # @example # s3_client = Aws::S3::Client.new(region: 'us-east-1') # exit 1 unless bucket_exists?(s3_client, 'doc-example-bucket') def bucket_exists?(s3_client, bucket_name) response = s3_client.list_buckets response.buckets.each do |bucket| return true if bucket.name == bucket_name end rescue StandardError => e puts "Error while checking whether the bucket '#{bucket_name}' " \ "exists: #{e.message}" end # Checks whether a folder exists in a bucket in Amazon S3. # # @param s3_client [Aws::S3::Client] An initialized Amazon S3 client. # @param bucket_name [String] The name of the bucket. # @param folder_name [String] The name of the folder. # @return [Boolean] true if the folder exists; otherwise, false. # @example # s3_client = Aws::S3::Client.new(region: 'us-east-1') # exit 1 unless folder_exists?(s3_client, 'doc-example-bucket', 'my-folder/') def folder_exists?(s3_client, bucket_name, folder_name) response = s3_client.list_objects_v2(bucket: bucket_name) if response.count.positive? response.contents.each do |object| return true if object.key == folder_name end end rescue StandardError => e puts "Error while checking whether the folder '#{folder_name}' " \ "exists: #{e.message}" end # Uploads a file to a folder within a bucket in Amazon S3. # # @param s3_client [Aws::S3::Client] An initialized Amazon S3 client. # @param bucket_name [String] The name of the bucket. # @param folder_name [String] The name of the folder. # @param file_name [String] The name of the file. # @return [Boolean] true if the file was uploaded; otherwise, false. # @example # s3_client = Aws::S3::Client.new(region: 'us-east-1') # exit 1 unless upload_file_to_folder?( # s3_client, # 'doc-example-bucket', # 'my-folder/', # 'my-file.txt') def upload_file_to_folder?(s3_client, bucket_name, folder_name, file_name) s3_client.put_object( body: file_name, bucket: bucket_name, key: folder_name + file_name ) return true rescue StandardError => e puts "Error while uploading the file '#{file_name}' to the " \ "folder '#{folder_name}' in the bucket '#{bucket_name}': #{e.message}" end # Full example call: def run_me file_name = 'my-file-1.txt' bucket_name = 'doc-example-bucket' folder_name = 'my-folder/' region = 'us-east-1' s3_client = Aws::S3::Client.new(region: region) puts 'Checking whether the specified file exists and is indeed a file...' if file_exists_and_file?(file_name) puts "The file '#{file_name}' exists and is a file." else puts "The file '#{file_name}' does not exist or is not a file and will " \ 'not be uploaded. Stopping program.' exit 1 end puts "\nChecking whether the specified bucket exists..." if bucket_exists?(s3_client, bucket_name) puts "The bucket '#{bucket_name}' exists." else puts "The bucket '#{bucket_name}' does not exist. Stopping program." exit 1 end puts "\nChecking whether the specified folder exists..." if folder_exists?(s3_client, bucket_name, folder_name) puts "The folder '#{folder_name}' exists." else puts "The folder '#{folder_name}' does not exist in the bucket " \ "'#{bucket_name}' or access to the bucket is denied. Stopping program." exit 1 end puts "\nUploading file..." if upload_file_to_folder?(s3_client, bucket_name, folder_name, file_name) puts "The file '#{file_name}' was uploaded." else puts "The file '#{file_name}' could not be uploaded. Stopping program." exit 1 end end run_me if $PROGRAM_NAME == __FILE__
1
20,527
Simple Storage **Service**
awsdocs-aws-doc-sdk-examples
rb
@@ -54,6 +54,8 @@ if (!is_dir($cacheDir)) { // Enable caching unless in dev mode or running tests: $useCache = APPLICATION_ENV != 'development' && !defined('VUFIND_PHPUNIT_RUNNING'); +defined('CACHE_ENABLED') || define('CACHE_ENABLED', $useCache); + // Build configuration: return [ 'modules' => array_unique($modules),
1
<?php // Set up modules: $modules = [ 'Zend\Router', 'ZfcRbac', 'VuFindTheme', 'VuFindSearch', 'VuFind', 'VuFindAdmin', 'VuFindApi' ]; if (PHP_SAPI == 'cli' && !defined('VUFIND_PHPUNIT_RUNNING')) { $modules[] = 'Zend\Mvc\Console'; $modules[] = 'VuFindConsole'; } if (APPLICATION_ENV == 'development') { array_push($modules, 'Zf2Whoops'); $modules[] = 'VuFindDevTools'; } if ($localModules = getenv('VUFIND_LOCAL_MODULES')) { $localModules = array_map('trim', explode(',', $localModules)); foreach ($localModules as $current) { if (!empty($current)) { $modules[] = $current; } } } // Set up cache directory (be sure to keep separate cache for CLI vs. web and // to account for potentially variant environment settings): $baseDir = ($local = getenv('VUFIND_LOCAL_DIR')) ? $local : 'data'; $cacheDir = ($cache = getenv('VUFIND_CACHE_DIR')) ? $cache : $baseDir . '/cache'; if (!is_dir($cacheDir)) { mkdir($cacheDir); } if (PHP_SAPI == 'cli') { $cacheDir .= '/cli'; if (!is_dir($cacheDir)) { mkdir($cacheDir); } $cacheDir .= '/configs'; } else { $cacheDir .= '/configs'; } if (!is_dir($cacheDir)) { mkdir($cacheDir); } $cacheHash = md5( APPLICATION_ENV . (defined('VUFIND_LOCAL_DIR') ? VUFIND_LOCAL_DIR : '') . implode(',', $modules) ); $cacheDir .= '/' . $cacheHash; if (!is_dir($cacheDir)) { mkdir($cacheDir); } // Enable caching unless in dev mode or running tests: $useCache = APPLICATION_ENV != 'development' && !defined('VUFIND_PHPUNIT_RUNNING'); // Build configuration: return [ 'modules' => array_unique($modules), 'module_listener_options' => [ 'config_glob_paths' => [ 'config/autoload/{,*.}{global,local}.php', ], 'config_cache_enabled' => $useCache, 'module_map_cache_enabled' => $useCache, 'check_dependencies' => (APPLICATION_ENV == 'development'), 'cache_dir' => $cacheDir, 'module_paths' => [ './module', './vendor', ], ], 'service_manager' => [ 'use_defaults' => true, 'factories' => [ ], ], ];
1
26,267
Does this line (and the corresponding one in the test bootstrap) actually do anything? I don't see where CACHE_ENABLED is used as a global constant.
vufind-org-vufind
php
@@ -445,13 +445,14 @@ func assertResolved(f func(target *api.Container, dep *api.Container) bool, targ } } -func TestTransitionDependenciesResolved(t *testing.T) { +func TestVerifyTransitionDependenciesResolved(t *testing.T) { testcases := []struct { Name string TargetKnown api.ContainerStatus TargetDesired api.ContainerStatus + TargetNext api.ContainerStatus + DependencyName string DependencyKnown api.ContainerStatus - DependentStatus api.ContainerStatus SatisfiedStatus api.ContainerStatus ExpectedResolved bool }{
1
// +build !integration // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package dependencygraph import ( "testing" "fmt" "github.com/aws/amazon-ecs-agent/agent/api" "github.com/stretchr/testify/assert" ) func volumeStrToVol(vols []string) []api.VolumeFrom { ret := make([]api.VolumeFrom, len(vols)) for i, v := range vols { ret[i] = api.VolumeFrom{SourceContainer: v, ReadOnly: false} } return ret } func steadyStateContainer(name string, links, volumes []string, desiredState api.ContainerStatus, steadyState api.ContainerStatus) *api.Container { container := api.NewContainerWithSteadyState(steadyState) container.Name = name container.Links = links container.VolumesFrom = volumeStrToVol(volumes) container.DesiredStatusUnsafe = desiredState return container } func createdContainer(name string, links, volumes []string, steadyState api.ContainerStatus) *api.Container { container := api.NewContainerWithSteadyState(steadyState) container.Name = name container.Links = links container.VolumesFrom = volumeStrToVol(volumes) container.DesiredStatusUnsafe = api.ContainerCreated return container } func TestValidDependencies(t *testing.T) { // Empty task task := &api.Task{} resolveable := ValidDependencies(task) assert.True(t, resolveable, "The zero dependency graph should resolve") task = &api.Task{ Containers: []*api.Container{ { Name: "redis", DesiredStatusUnsafe: api.ContainerRunning, }, }, } resolveable = ValidDependencies(task) assert.True(t, resolveable, "One container should resolve trivially") // Webserver stack php := steadyStateContainer("php", []string{"db"}, []string{}, api.ContainerRunning, api.ContainerRunning) db := steadyStateContainer("db", []string{}, []string{"dbdatavolume"}, api.ContainerRunning, api.ContainerRunning) dbdata := createdContainer("dbdatavolume", []string{}, []string{}, api.ContainerRunning) webserver := steadyStateContainer("webserver", []string{"php"}, []string{"htmldata"}, api.ContainerRunning, api.ContainerRunning) htmldata := steadyStateContainer("htmldata", []string{}, []string{"sharedcssfiles"}, api.ContainerRunning, api.ContainerRunning) sharedcssfiles := createdContainer("sharedcssfiles", []string{}, []string{}, api.ContainerRunning) task = &api.Task{ Containers: []*api.Container{ php, db, dbdata, webserver, htmldata, sharedcssfiles, }, } resolveable = ValidDependencies(task) assert.True(t, resolveable, "The webserver group should resolve just fine") } func TestValidDependenciesWithCycles(t *testing.T) { // Unresolveable: cycle task := &api.Task{ Containers: []*api.Container{ steadyStateContainer("a", []string{"b"}, []string{}, api.ContainerRunning, api.ContainerRunning), steadyStateContainer("b", []string{"a"}, []string{}, api.ContainerRunning, api.ContainerRunning), }, } resolveable := ValidDependencies(task) assert.False(t, resolveable, "Cycle should not be resolveable") } func TestValidDependenciesWithUnresolvedReference(t *testing.T) { // Unresolveable, reference doesn't exist task := &api.Task{ Containers: []*api.Container{ steadyStateContainer("php", []string{"db"}, []string{}, api.ContainerRunning, api.ContainerRunning), }, } resolveable := ValidDependencies(task) assert.False(t, resolveable, "Nonexistent reference shouldn't resolve") } func TestDependenciesAreResolvedWhenSteadyStateIsRunning(t *testing.T) { task := &api.Task{ Containers: []*api.Container{ { Name: "redis", DesiredStatusUnsafe: api.ContainerRunning, }, }, } err := DependenciesAreResolved(task.Containers[0], task.Containers, "", nil) assert.NoError(t, err, "One container should resolve trivially") // Webserver stack php := steadyStateContainer("php", []string{"db"}, []string{}, api.ContainerRunning, api.ContainerRunning) db := steadyStateContainer("db", []string{}, []string{"dbdatavolume"}, api.ContainerRunning, api.ContainerRunning) dbdata := createdContainer("dbdatavolume", []string{}, []string{}, api.ContainerRunning) webserver := steadyStateContainer("webserver", []string{"php"}, []string{"htmldata"}, api.ContainerRunning, api.ContainerRunning) htmldata := steadyStateContainer("htmldata", []string{}, []string{"sharedcssfiles"}, api.ContainerRunning, api.ContainerRunning) sharedcssfiles := createdContainer("sharedcssfiles", []string{}, []string{}, api.ContainerRunning) task = &api.Task{ Containers: []*api.Container{ php, db, dbdata, webserver, htmldata, sharedcssfiles, }, } err = DependenciesAreResolved(php, task.Containers, "", nil) assert.Error(t, err, "Shouldn't be resolved; db isn't running") err = DependenciesAreResolved(db, task.Containers, "", nil) assert.Error(t, err, "Shouldn't be resolved; dbdatavolume isn't created") err = DependenciesAreResolved(dbdata, task.Containers, "", nil) assert.NoError(t, err, "data volume with no deps should resolve") dbdata.KnownStatusUnsafe = api.ContainerCreated err = DependenciesAreResolved(php, task.Containers, "", nil) assert.Error(t, err, "Php shouldn't run, db is not created") db.KnownStatusUnsafe = api.ContainerCreated err = DependenciesAreResolved(php, task.Containers, "", nil) assert.Error(t, err, "Php shouldn't run, db is not running") err = DependenciesAreResolved(db, task.Containers, "", nil) assert.NoError(t, err, "db should be resolved, dbdata volume is Created") db.KnownStatusUnsafe = api.ContainerRunning err = DependenciesAreResolved(php, task.Containers, "", nil) assert.NoError(t, err, "Php should resolve") } func TestRunDependencies(t *testing.T) { c1 := &api.Container{ Name: "a", KnownStatusUnsafe: api.ContainerStatusNone, } c2 := &api.Container{ Name: "b", KnownStatusUnsafe: api.ContainerStatusNone, DesiredStatusUnsafe: api.ContainerCreated, SteadyStateDependencies: []string{"a"}, } task := &api.Task{Containers: []*api.Container{c1, c2}} assert.Error(t, DependenciesAreResolved(c2, task.Containers, "", nil), "Dependencies should not be resolved") task.Containers[1].SetDesiredStatus(api.ContainerRunning) assert.Error(t, DependenciesAreResolved(c2, task.Containers, "", nil), "Dependencies should not be resolved") task.Containers[0].KnownStatusUnsafe = api.ContainerRunning assert.NoError(t, DependenciesAreResolved(c2, task.Containers, "", nil), "Dependencies should be resolved") task.Containers[1].SetDesiredStatus(api.ContainerCreated) assert.NoError(t, DependenciesAreResolved(c1, task.Containers, "", nil), "Dependencies should be resolved") } func TestRunDependenciesWhenSteadyStateIsResourcesProvisionedForOneContainer(t *testing.T) { // Webserver stack php := steadyStateContainer("php", []string{"db"}, []string{}, api.ContainerRunning, api.ContainerRunning) db := steadyStateContainer("db", []string{}, []string{"dbdatavolume"}, api.ContainerRunning, api.ContainerRunning) dbdata := createdContainer("dbdatavolume", []string{}, []string{}, api.ContainerRunning) webserver := steadyStateContainer("webserver", []string{"php"}, []string{"htmldata"}, api.ContainerRunning, api.ContainerRunning) htmldata := steadyStateContainer("htmldata", []string{}, []string{"sharedcssfiles"}, api.ContainerRunning, api.ContainerRunning) sharedcssfiles := createdContainer("sharedcssfiles", []string{}, []string{}, api.ContainerRunning) // The Pause container, being added to the webserver stack pause := steadyStateContainer("pause", []string{}, []string{}, api.ContainerResourcesProvisioned, api.ContainerResourcesProvisioned) task := &api.Task{ Containers: []*api.Container{ php, db, dbdata, webserver, htmldata, sharedcssfiles, pause, }, } // Add a dependency on the pause container for all containers in the webserver stack for _, container := range task.Containers { if container.Name == "pause" { continue } container.SteadyStateDependencies = []string{"pause"} err := DependenciesAreResolved(container, task.Containers, "", nil) assert.Error(t, err, "Shouldn't be resolved; pause isn't running") } err := DependenciesAreResolved(pause, task.Containers, "", nil) assert.NoError(t, err, "Pause container's dependencies should be resolved") // Transition pause container to RUNNING pause.KnownStatusUnsafe = api.ContainerRunning // Transition dependencies in webserver stack to CREATED/RUNNING state dbdata.KnownStatusUnsafe = api.ContainerCreated db.KnownStatusUnsafe = api.ContainerRunning for _, container := range task.Containers { if container.Name == "pause" { continue } // Assert that dependencies remain unresolved until the pause container reaches // RESOURCES_PROVISIONED err = DependenciesAreResolved(container, task.Containers, "", nil) assert.Error(t, err, "Shouldn't be resolved; pause isn't running") } pause.KnownStatusUnsafe = api.ContainerResourcesProvisioned // Dependecies should be resolved now that the 'pause' container has // transitioned into RESOURCES_PROVISIONED err = DependenciesAreResolved(php, task.Containers, "", nil) assert.NoError(t, err, "Php should resolve") } func TestVolumeCanResolve(t *testing.T) { testcases := []struct { TargetDesired api.ContainerStatus VolumeDesired api.ContainerStatus Resolvable bool }{ { TargetDesired: api.ContainerCreated, VolumeDesired: api.ContainerStatusNone, Resolvable: false, }, { TargetDesired: api.ContainerCreated, VolumeDesired: api.ContainerCreated, Resolvable: true, }, { TargetDesired: api.ContainerCreated, VolumeDesired: api.ContainerRunning, Resolvable: true, }, { TargetDesired: api.ContainerCreated, VolumeDesired: api.ContainerStopped, Resolvable: true, }, { TargetDesired: api.ContainerCreated, VolumeDesired: api.ContainerZombie, Resolvable: false, }, { TargetDesired: api.ContainerRunning, VolumeDesired: api.ContainerStatusNone, Resolvable: false, }, { TargetDesired: api.ContainerRunning, VolumeDesired: api.ContainerCreated, Resolvable: true, }, { TargetDesired: api.ContainerRunning, VolumeDesired: api.ContainerRunning, Resolvable: true, }, { TargetDesired: api.ContainerRunning, VolumeDesired: api.ContainerStopped, Resolvable: true, }, { TargetDesired: api.ContainerRunning, VolumeDesired: api.ContainerZombie, Resolvable: false, }, { TargetDesired: api.ContainerStatusNone, Resolvable: false, }, { TargetDesired: api.ContainerStopped, Resolvable: false, }, { TargetDesired: api.ContainerZombie, Resolvable: false, }, } for _, tc := range testcases { t.Run(fmt.Sprintf("T:%s+V:%s", tc.TargetDesired.String(), tc.VolumeDesired.String()), assertCanResolve(volumeCanResolve, tc.TargetDesired, tc.VolumeDesired, tc.Resolvable)) } } func TestVolumeIsResolved(t *testing.T) { testcases := []struct { TargetDesired api.ContainerStatus VolumeKnown api.ContainerStatus Resolved bool }{ { TargetDesired: api.ContainerCreated, VolumeKnown: api.ContainerStatusNone, Resolved: false, }, { TargetDesired: api.ContainerCreated, VolumeKnown: api.ContainerCreated, Resolved: true, }, { TargetDesired: api.ContainerCreated, VolumeKnown: api.ContainerRunning, Resolved: true, }, { TargetDesired: api.ContainerCreated, VolumeKnown: api.ContainerStopped, Resolved: true, }, { TargetDesired: api.ContainerCreated, VolumeKnown: api.ContainerZombie, Resolved: false, }, { TargetDesired: api.ContainerRunning, VolumeKnown: api.ContainerStatusNone, Resolved: false, }, { TargetDesired: api.ContainerRunning, VolumeKnown: api.ContainerCreated, Resolved: true, }, { TargetDesired: api.ContainerRunning, VolumeKnown: api.ContainerRunning, Resolved: true, }, { TargetDesired: api.ContainerRunning, VolumeKnown: api.ContainerStopped, Resolved: true, }, { TargetDesired: api.ContainerRunning, VolumeKnown: api.ContainerZombie, Resolved: false, }, { TargetDesired: api.ContainerStatusNone, Resolved: false, }, { TargetDesired: api.ContainerStopped, Resolved: false, }, { TargetDesired: api.ContainerZombie, Resolved: false, }, } for _, tc := range testcases { t.Run(fmt.Sprintf("T:%s+V:%s", tc.TargetDesired.String(), tc.VolumeKnown.String()), assertResolved(volumeIsResolved, tc.TargetDesired, tc.VolumeKnown, tc.Resolved)) } } func TestOnSteadyStateIsResolved(t *testing.T) { testcases := []struct { TargetDesired api.ContainerStatus RunKnown api.ContainerStatus Resolved bool }{ { TargetDesired: api.ContainerStatusNone, Resolved: false, }, { TargetDesired: api.ContainerPulled, Resolved: false, }, { TargetDesired: api.ContainerCreated, RunKnown: api.ContainerCreated, Resolved: false, }, { TargetDesired: api.ContainerCreated, RunKnown: api.ContainerRunning, Resolved: true, }, { TargetDesired: api.ContainerCreated, RunKnown: api.ContainerStopped, Resolved: true, }, } for _, tc := range testcases { t.Run(fmt.Sprintf("T:%s+R:%s", tc.TargetDesired.String(), tc.RunKnown.String()), assertResolved(onSteadyStateIsResolved, tc.TargetDesired, tc.RunKnown, tc.Resolved)) } } func assertCanResolve(f func(target *api.Container, dep *api.Container) bool, targetDesired, depKnown api.ContainerStatus, expectedResolvable bool) func(t *testing.T) { return func(t *testing.T) { target := &api.Container{ DesiredStatusUnsafe: targetDesired, } dep := &api.Container{ DesiredStatusUnsafe: depKnown, } resolvable := f(target, dep) assert.Equal(t, expectedResolvable, resolvable) } } func assertResolved(f func(target *api.Container, dep *api.Container) bool, targetDesired, depKnown api.ContainerStatus, expectedResolved bool) func(t *testing.T) { return func(t *testing.T) { target := &api.Container{ DesiredStatusUnsafe: targetDesired, } dep := &api.Container{ KnownStatusUnsafe: depKnown, } resolved := f(target, dep) assert.Equal(t, expectedResolved, resolved) } } func TestTransitionDependenciesResolved(t *testing.T) { testcases := []struct { Name string TargetKnown api.ContainerStatus TargetDesired api.ContainerStatus DependencyKnown api.ContainerStatus DependentStatus api.ContainerStatus SatisfiedStatus api.ContainerStatus ExpectedResolved bool }{ { Name: "Nothing running, pull depends on running", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerStatusNone, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerRunning, ExpectedResolved: false, }, { Name: "Nothing running, pull depends on resources provisioned", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerStatusNone, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerResourcesProvisioned, ExpectedResolved: false, }, { Name: "Nothing running, create depends on running", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerStatusNone, DependentStatus: api.ContainerCreated, SatisfiedStatus: api.ContainerRunning, ExpectedResolved: true, }, { Name: "Dependency created, pull depends on running", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerCreated, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerRunning, ExpectedResolved: false, }, { Name: "Dependency created, pull depends on resources provisioned", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerCreated, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerResourcesProvisioned, ExpectedResolved: false, }, { Name: "Dependency running, pull depends on running", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerRunning, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerRunning, ExpectedResolved: true, }, { Name: "Dependency running, pull depends on resources provisioned", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerRunning, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerResourcesProvisioned, ExpectedResolved: false, }, { Name: "Dependency resources provisioned, pull depends on resources provisioned", TargetKnown: api.ContainerStatusNone, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerResourcesProvisioned, DependentStatus: api.ContainerPulled, SatisfiedStatus: api.ContainerResourcesProvisioned, ExpectedResolved: true, }, { Name: "Dependency running, create depends on created", TargetKnown: api.ContainerPulled, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerRunning, DependentStatus: api.ContainerCreated, SatisfiedStatus: api.ContainerCreated, ExpectedResolved: true, }, { Name: "Target running, create depends on running", TargetKnown: api.ContainerRunning, TargetDesired: api.ContainerRunning, DependencyKnown: api.ContainerRunning, DependentStatus: api.ContainerRunning, SatisfiedStatus: api.ContainerCreated, ExpectedResolved: true, }, // Note: Not all possible situations are tested here. The only situations tested here are ones that are // expected to reasonably happen at the time this code was written. Other behavior is not expected to occur, // so it is not tested. } for _, tc := range testcases { t.Run(tc.Name, func(t *testing.T) { containerDependency := api.ContainerDependency{ DependentStatus: tc.DependentStatus, SatisfiedStatus: tc.SatisfiedStatus, } target := &api.Container{ KnownStatusUnsafe: tc.TargetKnown, DesiredStatusUnsafe: tc.TargetDesired, } dep := &api.Container{ KnownStatusUnsafe: tc.DependencyKnown, } resolved := resolvesContainerTransitionDependency(target, dep, containerDependency) assert.Equal(t, tc.ExpectedResolved, resolved) }) } }
1
19,138
has this changed? are we testing "all possible situations" now?
aws-amazon-ecs-agent
go
@@ -236,13 +236,13 @@ public class SolrCLI implements CLIO { .argName("HOST") .hasArg() .required(false) - .desc("Address of the Zookeeper ensemble; defaults to: "+ZK_HOST) + .desc("Address of the Zookeeper ensemble; defaults to: "+ ZK_HOST + '.') .build(), Option.builder("c") .argName("COLLECTION") .hasArg() .required(false) - .desc("Name of collection; no default") + .desc("Name of collection; no default.") .longOpt("collection") .build(), Option.builder("verbose")
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.util; import javax.net.ssl.SSLPeerUnverifiedException; import java.io.Console; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; import java.lang.invoke.MethodHandles; import java.net.ConnectException; import java.net.Socket; import java.net.SocketException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.FileOwnerAttributeView; import java.time.Instant; import java.time.Period; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Scanner; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.exec.DefaultExecuteResultHandler; import org.apache.commons.exec.DefaultExecutor; import org.apache.commons.exec.ExecuteException; import org.apache.commons.exec.Executor; import org.apache.commons.exec.OS; import org.apache.commons.exec.environment.EnvironmentUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.SystemUtils; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.NoHttpResponseException; import org.apache.http.StatusLine; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.HttpClient; import org.apache.http.client.HttpResponseException; import org.apache.http.client.ResponseHandler; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpHead; import org.apache.http.client.utils.URIBuilder; import org.apache.http.conn.ConnectTimeoutException; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; import org.apache.lucene.util.Version; import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrRequest; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.impl.CloudSolrClient; import org.apache.solr.client.solrj.impl.HttpClientUtil; import org.apache.solr.client.solrj.impl.HttpSolrClient; import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider; import org.apache.solr.client.solrj.request.CollectionAdminRequest; import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest; import org.apache.solr.client.solrj.response.CollectionAdminResponse; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.common.SolrException; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.cloud.UrlScheme; import org.apache.solr.common.cloud.Replica; import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.cloud.SolrZkClient; import org.apache.solr.common.cloud.ZkConfigManager; import org.apache.solr.common.cloud.ZkCoreNodeProps; import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.CollectionAdminParams; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.util.ContentStreamBase; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.StrUtils; import org.apache.solr.security.Sha256AuthenticationProvider; import org.apache.solr.util.configuration.SSLConfigurationsFactory; import org.noggit.CharArr; import org.noggit.JSONParser; import org.noggit.JSONWriter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.common.SolrException.ErrorCode.FORBIDDEN; import static org.apache.solr.common.SolrException.ErrorCode.UNAUTHORIZED; import static org.apache.solr.common.params.CommonParams.DISTRIB; import static org.apache.solr.common.params.CommonParams.NAME; import static org.apache.solr.common.util.Utils.fromJSONString; /** * Command-line utility for working with Solr. */ public class SolrCLI implements CLIO { private static final long MAX_WAIT_FOR_CORE_LOAD_NANOS = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MINUTES); /** * Defines the interface to a Solr tool that can be run from this command-line app. */ public interface Tool { String getName(); Option[] getOptions(); int runTool(CommandLine cli) throws Exception; } public static abstract class ToolBase implements Tool { protected PrintStream stdout; protected boolean verbose = false; protected ToolBase() { this(CLIO.getOutStream()); } protected ToolBase(PrintStream stdout) { this.stdout = stdout; } protected void echoIfVerbose(final String msg, CommandLine cli) { if (cli.hasOption("verbose")) { echo(msg); } } protected void echo(final String msg) { stdout.println(msg); } public int runTool(CommandLine cli) throws Exception { verbose = cli.hasOption("verbose"); int toolExitStatus = 0; try { runImpl(cli); } catch (Exception exc) { // since this is a CLI, spare the user the stacktrace String excMsg = exc.getMessage(); if (excMsg != null) { CLIO.err("\nERROR: " + excMsg + "\n"); if (verbose) { exc.printStackTrace(CLIO.getErrStream()); } toolExitStatus = 1; } else { throw exc; } } return toolExitStatus; } protected abstract void runImpl(CommandLine cli) throws Exception; } /** * Helps build SolrCloud aware tools by initializing a CloudSolrClient * instance before running the tool. */ public static abstract class SolrCloudTool extends ToolBase { protected SolrCloudTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return cloudOptions; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = cli.getOptionValue("zkHost", ZK_HOST); log.debug("Connecting to Solr cluster: {}", zkHost); try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).build()) { String collection = cli.getOptionValue("collection"); if (collection != null) cloudSolrClient.setDefaultCollection(collection); cloudSolrClient.connect(); runCloudTool(cloudSolrClient, cli); } } /** * Runs a SolrCloud tool with CloudSolrClient initialized */ protected abstract void runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception; } private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); public static final String DEFAULT_SOLR_URL = "http://localhost:8983/solr"; public static final String ZK_HOST = "localhost:9983"; public static Option[] cloudOptions = new Option[] { Option.builder("zkHost") .argName("HOST") .hasArg() .required(false) .desc("Address of the Zookeeper ensemble; defaults to: "+ZK_HOST) .build(), Option.builder("c") .argName("COLLECTION") .hasArg() .required(false) .desc("Name of collection; no default") .longOpt("collection") .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; private static void exit(int exitStatus) { try { System.exit(exitStatus); } catch (java.lang.SecurityException secExc) { if (exitStatus != 0) throw new RuntimeException("SolrCLI failed to exit with status "+exitStatus); } } /** * Runs a tool. */ public static void main(String[] args) throws Exception { if (args == null || args.length == 0 || args[0] == null || args[0].trim().length() == 0) { CLIO.err("Invalid command-line args! Must pass the name of a tool to run.\n" + "Supported tools:\n"); displayToolOptions(); exit(1); } if (args.length == 1 && Arrays.asList("-v","-version","version").contains(args[0])) { // Simple version tool, no need for its own class CLIO.out(Version.LATEST.toString()); exit(0); } SSLConfigurationsFactory.current().init(); Tool tool = findTool(args); CommandLine cli = parseCmdLine(args, tool.getOptions()); System.exit(tool.runTool(cli)); } public static Tool findTool(String[] args) throws Exception { String toolType = args[0].trim().toLowerCase(Locale.ROOT); return newTool(toolType); } public static CommandLine parseCmdLine(String[] args, Option[] toolOptions) throws Exception { // the parser doesn't like -D props List<String> toolArgList = new ArrayList<String>(); List<String> dashDList = new ArrayList<String>(); for (int a=1; a < args.length; a++) { String arg = args[a]; if (arg.startsWith("-D")) { dashDList.add(arg); } else { toolArgList.add(arg); } } String[] toolArgs = toolArgList.toArray(new String[0]); // process command-line args to configure this application CommandLine cli = processCommandLineArgs(joinCommonAndToolOptions(toolOptions), toolArgs); List<String> argList = cli.getArgList(); argList.addAll(dashDList); // for SSL support, try to accommodate relative paths set for SSL store props String solrInstallDir = System.getProperty("solr.install.dir"); if (solrInstallDir != null) { checkSslStoreSysProp(solrInstallDir, "keyStore"); checkSslStoreSysProp(solrInstallDir, "trustStore"); } return cli; } protected static void checkSslStoreSysProp(String solrInstallDir, String key) { String sysProp = "javax.net.ssl."+key; String keyStore = System.getProperty(sysProp); if (keyStore == null) return; File keyStoreFile = new File(keyStore); if (keyStoreFile.isFile()) return; // configured setting is OK keyStoreFile = new File(solrInstallDir, "server/"+keyStore); if (keyStoreFile.isFile()) { System.setProperty(sysProp, keyStoreFile.getAbsolutePath()); } else { CLIO.err("WARNING: "+sysProp+" file "+keyStore+ " not found! https requests to Solr will likely fail; please update your "+ sysProp+" setting to use an absolute path."); } } private static void raiseLogLevelUnlessVerbose(CommandLine cli) { if (! cli.hasOption("verbose")) { StartupLoggingUtils.changeLogLevel("WARN"); } } /** * Support options common to all tools. */ public static Option[] getCommonToolOptions() { return new Option[0]; } // Creates an instance of the requested tool, using classpath scanning if necessary private static Tool newTool(String toolType) throws Exception { if ("healthcheck".equals(toolType)) return new HealthcheckTool(); else if ("status".equals(toolType)) return new StatusTool(); else if ("api".equals(toolType)) return new ApiTool(); else if ("create_collection".equals(toolType)) return new CreateCollectionTool(); else if ("create_core".equals(toolType)) return new CreateCoreTool(); else if ("create".equals(toolType)) return new CreateTool(); else if ("delete".equals(toolType)) return new DeleteTool(); else if ("config".equals(toolType)) return new ConfigTool(); else if ("run_example".equals(toolType)) return new RunExampleTool(); else if ("upconfig".equals(toolType)) return new ConfigSetUploadTool(); else if ("downconfig".equals(toolType)) return new ConfigSetDownloadTool(); else if ("rm".equals(toolType)) return new ZkRmTool(); else if ("mv".equals(toolType)) return new ZkMvTool(); else if ("cp".equals(toolType)) return new ZkCpTool(); else if ("ls".equals(toolType)) return new ZkLsTool(); else if ("mkroot".equals(toolType)) return new ZkMkrootTool(); else if ("assert".equals(toolType)) return new AssertTool(); else if ("utils".equals(toolType)) return new UtilsTool(); else if ("auth".equals(toolType)) return new AuthTool(); else if ("export".equals(toolType)) return new ExportTool(); else if ("package".equals(toolType)) return new PackageTool(); // If you add a built-in tool to this class, add it here to avoid // classpath scanning for (Class<Tool> next : findToolClassesInPackage("org.apache.solr.util")) { Tool tool = next.getConstructor().newInstance(); if (toolType.equals(tool.getName())) return tool; } throw new IllegalArgumentException(toolType + " not supported!"); } private static void displayToolOptions() throws Exception { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("healthcheck", getToolOptions(new HealthcheckTool())); formatter.printHelp("status", getToolOptions(new StatusTool())); formatter.printHelp("api", getToolOptions(new ApiTool())); formatter.printHelp("create_collection", getToolOptions(new CreateCollectionTool())); formatter.printHelp("create_core", getToolOptions(new CreateCoreTool())); formatter.printHelp("create", getToolOptions(new CreateTool())); formatter.printHelp("delete", getToolOptions(new DeleteTool())); formatter.printHelp("config", getToolOptions(new ConfigTool())); formatter.printHelp("run_example", getToolOptions(new RunExampleTool())); formatter.printHelp("upconfig", getToolOptions(new ConfigSetUploadTool())); formatter.printHelp("downconfig", getToolOptions(new ConfigSetDownloadTool())); formatter.printHelp("rm", getToolOptions(new ZkRmTool())); formatter.printHelp("cp", getToolOptions(new ZkCpTool())); formatter.printHelp("mv", getToolOptions(new ZkMvTool())); formatter.printHelp("ls", getToolOptions(new ZkLsTool())); formatter.printHelp("export", getToolOptions(new ExportTool())); formatter.printHelp("package", getToolOptions(new PackageTool())); List<Class<Tool>> toolClasses = findToolClassesInPackage("org.apache.solr.util"); for (Class<Tool> next : toolClasses) { Tool tool = next.getConstructor().newInstance(); formatter.printHelp(tool.getName(), getToolOptions(tool)); } } private static Options getToolOptions(Tool tool) { Options options = new Options(); options.addOption("help", false, "Print this message"); options.addOption("verbose", false, "Generate verbose log messages"); Option[] toolOpts = joinCommonAndToolOptions(tool.getOptions()); for (int i = 0; i < toolOpts.length; i++) options.addOption(toolOpts[i]); return options; } public static Option[] joinCommonAndToolOptions(Option[] toolOpts) { return joinOptions(getCommonToolOptions(), toolOpts); } public static Option[] joinOptions(Option[] lhs, Option[] rhs) { List<Option> options = new ArrayList<Option>(); if (lhs != null && lhs.length > 0) { for (Option opt : lhs) options.add(opt); } if (rhs != null) { for (Option opt : rhs) options.add(opt); } return options.toArray(new Option[0]); } /** * Parses the command-line arguments passed by the user. */ public static CommandLine processCommandLineArgs(Option[] customOptions, String[] args) { Options options = new Options(); options.addOption("help", false, "Print this message"); options.addOption("verbose", false, "Generate verbose log messages"); if (customOptions != null) { for (int i = 0; i < customOptions.length; i++) options.addOption(customOptions[i]); } CommandLine cli = null; try { cli = (new GnuParser()).parse(options, args); } catch (ParseException exp) { boolean hasHelpArg = false; if (args != null && args.length > 0) { for (int z = 0; z < args.length; z++) { if ("--help".equals(args[z]) || "-help".equals(args[z])) { hasHelpArg = true; break; } } } if (!hasHelpArg) { CLIO.err("Failed to parse command-line arguments due to: " + exp.getMessage()); } HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(SolrCLI.class.getName(), options); exit(1); } if (cli.hasOption("help")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(SolrCLI.class.getName(), options); exit(0); } return cli; } /** * Scans Jar files on the classpath for Tool implementations to activate. */ @SuppressWarnings("unchecked") private static List<Class<Tool>> findToolClassesInPackage(String packageName) { List<Class<Tool>> toolClasses = new ArrayList<Class<Tool>>(); try { ClassLoader classLoader = SolrCLI.class.getClassLoader(); String path = packageName.replace('.', '/'); Enumeration<URL> resources = classLoader.getResources(path); Set<String> classes = new TreeSet<String>(); while (resources.hasMoreElements()) { URL resource = resources.nextElement(); classes.addAll(findClasses(resource.getFile(), packageName)); } for (String classInPackage : classes) { Class<?> theClass = Class.forName(classInPackage); if (Tool.class.isAssignableFrom(theClass)) toolClasses.add((Class<Tool>) theClass); } } catch (Exception e) { // safe to squelch this as it's just looking for tools to run log.debug("Failed to find Tool impl classes in {}, due to: ", packageName, e); } return toolClasses; } private static Set<String> findClasses(String path, String packageName) throws Exception { Set<String> classes = new TreeSet<String>(); if (path.startsWith("file:") && path.contains("!")) { String[] split = path.split("!"); URL jar = new URL(split[0]); try (ZipInputStream zip = new ZipInputStream(jar.openStream())) { ZipEntry entry; while ((entry = zip.getNextEntry()) != null) { if (entry.getName().endsWith(".class")) { String className = entry.getName().replaceAll("[$].*", "") .replaceAll("[.]class", "").replace('/', '.'); if (className.startsWith(packageName)) classes.add(className); } } } } return classes; } /** * Determine if a request to Solr failed due to a communication error, * which is generally retry-able. */ public static boolean checkCommunicationError(Exception exc) { Throwable rootCause = SolrException.getRootCause(exc); boolean wasCommError = (rootCause instanceof ConnectException || rootCause instanceof ConnectTimeoutException || rootCause instanceof NoHttpResponseException || rootCause instanceof SocketException); return wasCommError; } /** * Tries a simple HEAD request and throws SolrException in case of Authorization error * @param url the url to do a HEAD request to * @param httpClient the http client to use (make sure it has authentication optinos set) * @return the HTTP response code * @throws SolrException if auth/autz problems * @throws IOException if connection failure */ private static int attemptHttpHead(String url, HttpClient httpClient) throws SolrException, IOException { HttpResponse response = httpClient.execute(new HttpHead(url), HttpClientUtil.createNewHttpClientRequestContext()); int code = response.getStatusLine().getStatusCode(); if (code == UNAUTHORIZED.code || code == FORBIDDEN.code) { throw new SolrException(SolrException.ErrorCode.getErrorCode(code), "Solr requires authentication for " + url + ". Please supply valid credentials. HTTP code=" + code); } return code; } private static boolean exceptionIsAuthRelated(Exception exc) { return (exc instanceof SolrException && Arrays.asList(UNAUTHORIZED.code, FORBIDDEN.code).contains(((SolrException) exc).code())); } public static CloseableHttpClient getHttpClient() { ModifiableSolrParams params = new ModifiableSolrParams(); params.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 128); params.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 32); params.set(HttpClientUtil.PROP_FOLLOW_REDIRECTS, false); return HttpClientUtil.createClient(params); } @SuppressWarnings("deprecation") public static void closeHttpClient(CloseableHttpClient httpClient) { if (httpClient != null) { try { HttpClientUtil.close(httpClient); } catch (Exception exc) { // safe to ignore, we're just shutting things down } } } public static final String JSON_CONTENT_TYPE = "application/json"; public static NamedList<Object> postJsonToSolr(SolrClient solrClient, String updatePath, String jsonBody) throws Exception { ContentStreamBase.StringStream contentStream = new ContentStreamBase.StringStream(jsonBody); contentStream.setContentType(JSON_CONTENT_TYPE); ContentStreamUpdateRequest req = new ContentStreamUpdateRequest(updatePath); req.addContentStream(contentStream); return solrClient.request(req); } /** * Useful when a tool just needs to send one request to Solr. */ public static Map<String,Object> getJson(String getUrl) throws Exception { Map<String,Object> json = null; CloseableHttpClient httpClient = getHttpClient(); try { json = getJson(httpClient, getUrl, 2, true); } finally { closeHttpClient(httpClient); } return json; } /** * Utility function for sending HTTP GET request to Solr with built-in retry support. */ public static Map<String,Object> getJson(HttpClient httpClient, String getUrl, int attempts, boolean isFirstAttempt) throws Exception { Map<String,Object> json = null; if (attempts >= 1) { try { json = getJson(httpClient, getUrl); } catch (Exception exc) { if (exceptionIsAuthRelated(exc)) { throw exc; } if (--attempts > 0 && checkCommunicationError(exc)) { if (!isFirstAttempt) // only show the log warning after the second attempt fails log.warn("Request to {} failed, sleeping for 5 seconds before re-trying the request ...", getUrl, exc); try { Thread.sleep(5000); } catch (InterruptedException ie) { Thread.interrupted(); } // retry using recursion with one-less attempt available json = getJson(httpClient, getUrl, attempts, false); } else { // no more attempts or error is not retry-able throw exc; } } } return json; } @SuppressWarnings("unchecked") private static class SolrResponseHandler implements ResponseHandler<Map<String,Object>> { public Map<String,Object> handleResponse(HttpResponse response) throws ClientProtocolException, IOException { HttpEntity entity = response.getEntity(); if (entity != null) { String respBody = EntityUtils.toString(entity); Object resp = null; try { resp = fromJSONString(respBody); } catch (JSONParser.ParseException pe) { throw new ClientProtocolException("Expected JSON response from server but received: "+respBody+ "\nTypically, this indicates a problem with the Solr server; check the Solr server logs for more information."); } if (resp != null && resp instanceof Map) { return (Map<String,Object>)resp; } else { throw new ClientProtocolException("Expected JSON object in response but received "+ resp); } } else { StatusLine statusLine = response.getStatusLine(); throw new HttpResponseException(statusLine.getStatusCode(), statusLine.getReasonPhrase()); } } } /** * Utility function for sending HTTP GET request to Solr and then doing some * validation of the response. */ @SuppressWarnings({"unchecked"}) public static Map<String,Object> getJson(HttpClient httpClient, String getUrl) throws Exception { try { // ensure we're requesting JSON back from Solr HttpGet httpGet = new HttpGet(new URIBuilder(getUrl).setParameter(CommonParams.WT, CommonParams.JSON).build()); // make the request and get back a parsed JSON object Map<String, Object> json = httpClient.execute(httpGet, new SolrResponseHandler(), HttpClientUtil.createNewHttpClientRequestContext()); // check the response JSON from Solr to see if it is an error Long statusCode = asLong("/responseHeader/status", json); if (statusCode == -1) { throw new SolrServerException("Unable to determine outcome of GET request to: "+ getUrl+"! Response: "+json); } else if (statusCode != 0) { String errMsg = asString("/error/msg", json); if (errMsg == null) errMsg = String.valueOf(json); throw new SolrServerException(errMsg); } else { // make sure no "failure" object in there either Object failureObj = json.get("failure"); if (failureObj != null) { if (failureObj instanceof Map) { Object err = ((Map)failureObj).get(""); if (err != null) throw new SolrServerException(err.toString()); } throw new SolrServerException(failureObj.toString()); } } return json; } catch (ClientProtocolException cpe) { // Currently detecting authentication by string-matching the HTTP response // Perhaps SolrClient should have thrown an exception itself?? if (cpe.getMessage().contains("HTTP ERROR 401") || cpe.getMessage().contentEquals("HTTP ERROR 403")) { int code = cpe.getMessage().contains("HTTP ERROR 401") ? 401 : 403; throw new SolrException(SolrException.ErrorCode.getErrorCode(code), "Solr requires authentication for " + getUrl + ". Please supply valid credentials. HTTP code=" + code); } else { throw cpe; } } } /** * Helper function for reading a String value from a JSON Object tree. */ public static String asString(String jsonPath, Map<String,Object> json) { return pathAs(String.class, jsonPath, json); } /** * Helper function for reading a Long value from a JSON Object tree. */ public static Long asLong(String jsonPath, Map<String,Object> json) { return pathAs(Long.class, jsonPath, json); } /** * Helper function for reading a List of Strings from a JSON Object tree. */ @SuppressWarnings("unchecked") public static List<String> asList(String jsonPath, Map<String,Object> json) { return pathAs(List.class, jsonPath, json); } /** * Helper function for reading a Map from a JSON Object tree. */ @SuppressWarnings("unchecked") public static Map<String,Object> asMap(String jsonPath, Map<String,Object> json) { return pathAs(Map.class, jsonPath, json); } @SuppressWarnings("unchecked") public static <T> T pathAs(Class<T> clazz, String jsonPath, Map<String,Object> json) { T val = null; Object obj = atPath(jsonPath, json); if (obj != null) { if (clazz.isAssignableFrom(obj.getClass())) { val = (T) obj; } else { // no ok if it's not null and of a different type throw new IllegalStateException("Expected a " + clazz.getName() + " at path "+ jsonPath+" but found "+obj+" instead! "+json); } } // it's ok if it is null return val; } /** * Helper function for reading an Object of unknown type from a JSON Object tree. * * To find a path to a child that starts with a slash (e.g. queryHandler named /query) * you must escape the slash. For instance /config/requestHandler/\/query/defaults/echoParams * would get the echoParams value for the "/query" request handler. */ @SuppressWarnings({"rawtypes", "unchecked"}) public static Object atPath(String jsonPath, Map<String,Object> json) { if ("/".equals(jsonPath)) return json; if (!jsonPath.startsWith("/")) throw new IllegalArgumentException("Invalid JSON path: "+ jsonPath+"! Must start with a /"); Map<String,Object> parent = json; Object result = null; String[] path = jsonPath.split("(?<![\\\\])/"); // Break on all slashes _not_ preceeded by a backslash for (int p=1; p < path.length; p++) { String part = path[p]; if (part.startsWith("\\")) { part = part.substring(1); } Object child = parent.get(part); if (child == null) break; if (p == path.length-1) { // success - found the node at the desired path result = child; } else { if (child instanceof Map) { // keep walking the path down to the desired node parent = (Map)child; } else { // early termination - hit a leaf before the requested node break; } } } return result; } /** * Get the status of a Solr server. */ public static class StatusTool extends ToolBase { public StatusTool() { this(CLIO.getOutStream()); } public StatusTool(PrintStream stdout) { super(stdout); } public String getName() { return "status"; } public Option[] getOptions() { return new Option[] { Option.builder("solr") .argName("URL") .hasArg() .required(false) .desc("Address of the Solr Web application, defaults to: "+DEFAULT_SOLR_URL) .build(), Option.builder("maxWaitSecs") .argName("SECS") .hasArg() .required(false) .desc("Wait up to the specified number of seconds to see Solr running.") .build() }; } protected void runImpl(CommandLine cli) throws Exception { int maxWaitSecs = Integer.parseInt(cli.getOptionValue("maxWaitSecs", "0")); String solrUrl = cli.getOptionValue("solr", DEFAULT_SOLR_URL); if (maxWaitSecs > 0) { int solrPort = (new URL(solrUrl)).getPort(); echo("Waiting up to "+maxWaitSecs+" to see Solr running on port "+solrPort); try { waitToSeeSolrUp(solrUrl, maxWaitSecs); echo("Started Solr server on port "+solrPort+". Happy searching!"); } catch (TimeoutException timeout) { throw new Exception("Solr at "+solrUrl+" did not come online within "+maxWaitSecs+" seconds!"); } } else { try { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(getStatus(solrUrl)); echo(arr.toString()); } catch (Exception exc) { if (exceptionIsAuthRelated(exc)) { throw exc; } if (checkCommunicationError(exc)) { // this is not actually an error from the tool as it's ok if Solr is not online. CLIO.err("Solr at "+solrUrl+" not online."); } else { throw new Exception("Failed to get system information from " + solrUrl + " due to: "+exc); } } } } public Map<String,Object> waitToSeeSolrUp(String solrUrl, int maxWaitSecs) throws Exception { long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(maxWaitSecs, TimeUnit.SECONDS); while (System.nanoTime() < timeout) { try { return getStatus(solrUrl); } catch (SSLPeerUnverifiedException exc) { throw exc; } catch (Exception exc) { if (exceptionIsAuthRelated(exc)) { throw exc; } try { Thread.sleep(2000L); } catch (InterruptedException interrupted) { timeout = 0; // stop looping } } } throw new TimeoutException("Did not see Solr at "+solrUrl+" come online within "+maxWaitSecs+" seconds!"); } public Map<String,Object> getStatus(String solrUrl) throws Exception { Map<String,Object> status = null; if (!solrUrl.endsWith("/")) solrUrl += "/"; String systemInfoUrl = solrUrl+"admin/info/system"; CloseableHttpClient httpClient = getHttpClient(); try { // hit Solr to get system info Map<String,Object> systemInfo = getJson(httpClient, systemInfoUrl, 2, true); // convert raw JSON into user-friendly output status = reportStatus(solrUrl, systemInfo, httpClient); } finally { closeHttpClient(httpClient); } return status; } public Map<String,Object> reportStatus(String solrUrl, Map<String,Object> info, HttpClient httpClient) throws Exception { Map<String,Object> status = new LinkedHashMap<String,Object>(); String solrHome = (String)info.get("solr_home"); status.put("solr_home", solrHome != null ? solrHome : "?"); status.put("version", asString("/lucene/solr-impl-version", info)); status.put("startTime", asString("/jvm/jmx/startTime", info)); status.put("uptime", uptime(asLong("/jvm/jmx/upTimeMS", info))); String usedMemory = asString("/jvm/memory/used", info); String totalMemory = asString("/jvm/memory/total", info); status.put("memory", usedMemory+" of "+totalMemory); // if this is a Solr in solrcloud mode, gather some basic cluster info if ("solrcloud".equals(info.get("mode"))) { String zkHost = (String)info.get("zkHost"); status.put("cloud", getCloudStatus(httpClient, solrUrl, zkHost)); } return status; } /** * Calls the CLUSTERSTATUS endpoint in Solr to get basic status information about * the SolrCloud cluster. */ protected Map<String,String> getCloudStatus(HttpClient httpClient, String solrUrl, String zkHost) throws Exception { Map<String,String> cloudStatus = new LinkedHashMap<String,String>(); cloudStatus.put("ZooKeeper", (zkHost != null) ? zkHost : "?"); String clusterStatusUrl = solrUrl+"admin/collections?action=CLUSTERSTATUS"; Map<String,Object> json = getJson(httpClient, clusterStatusUrl, 2, true); List<String> liveNodes = asList("/cluster/live_nodes", json); cloudStatus.put("liveNodes", String.valueOf(liveNodes.size())); Map<String,Object> collections = asMap("/cluster/collections", json); cloudStatus.put("collections", String.valueOf(collections.size())); return cloudStatus; } } // end StatusTool class /** * Used to send an arbitrary HTTP request to a Solr API endpoint. */ public static class ApiTool extends ToolBase { public ApiTool() { this(CLIO.getOutStream()); } public ApiTool(PrintStream stdout) { super(stdout); } public String getName() { return "api"; } public Option[] getOptions() { return new Option[] { Option.builder("get") .argName("URL") .hasArg() .required(false) .desc("Send a GET request to a Solr API endpoint") .build() }; } protected void runImpl(CommandLine cli) throws Exception { String getUrl = cli.getOptionValue("get"); if (getUrl != null) { Map<String,Object> json = getJson(getUrl); // pretty-print the response to stdout CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(json); echo(arr.toString()); } } } // end ApiTool class private static final String DEFAULT_CONFIG_SET = "_default"; private static final long MS_IN_MIN = 60 * 1000L; private static final long MS_IN_HOUR = MS_IN_MIN * 60L; private static final long MS_IN_DAY = MS_IN_HOUR * 24L; @VisibleForTesting static final String uptime(long uptimeMs) { if (uptimeMs <= 0L) return "?"; long numDays = (uptimeMs >= MS_IN_DAY) ? (uptimeMs / MS_IN_DAY) : 0L; long rem = uptimeMs - (numDays * MS_IN_DAY); long numHours = (rem >= MS_IN_HOUR) ? (rem / MS_IN_HOUR) : 0L; rem = rem - (numHours * MS_IN_HOUR); long numMinutes = (rem >= MS_IN_MIN) ? (rem / MS_IN_MIN) : 0L; rem = rem - (numMinutes * MS_IN_MIN); long numSeconds = Math.round(rem / 1000.0); return String.format(Locale.ROOT, "%d days, %d hours, %d minutes, %d seconds", numDays, numHours, numMinutes, numSeconds); } static class ReplicaHealth implements Comparable<ReplicaHealth> { String shard; String name; String url; String status; long numDocs; boolean isLeader; String uptime; String memory; ReplicaHealth(String shard, String name, String url, String status, long numDocs, boolean isLeader, String uptime, String memory) { this.shard = shard; this.name = name; this.url = url; this.numDocs = numDocs; this.status = status; this.isLeader = isLeader; this.uptime = uptime; this.memory = memory; } public Map<String,Object> asMap() { Map<String,Object> map = new LinkedHashMap<String,Object>(); map.put(NAME, name); map.put("url", url); map.put("numDocs", numDocs); map.put("status", status); if (uptime != null) map.put("uptime", uptime); if (memory != null) map.put("memory", memory); if (isLeader) map.put("leader", true); return map; } public String toString() { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(asMap()); return arr.toString(); } public int hashCode() { return this.shard.hashCode() + (isLeader ? 1 : 0); } public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (!(obj instanceof ReplicaHealth)) return true; ReplicaHealth that = (ReplicaHealth) obj; return this.shard.equals(that.shard) && this.isLeader == that.isLeader; } public int compareTo(ReplicaHealth other) { if (this == other) return 0; if (other == null) return 1; int myShardIndex = Integer.parseInt(this.shard.substring("shard".length())); int otherShardIndex = Integer.parseInt(other.shard.substring("shard".length())); if (myShardIndex == otherShardIndex) { // same shard index, list leaders first return this.isLeader ? -1 : 1; } return myShardIndex - otherShardIndex; } } static enum ShardState { healthy, degraded, down, no_leader } static class ShardHealth { String shard; List<ReplicaHealth> replicas; ShardHealth(String shard, List<ReplicaHealth> replicas) { this.shard = shard; this.replicas = replicas; } public ShardState getShardState() { boolean healthy = true; boolean hasLeader = false; boolean atLeastOneActive = false; for (ReplicaHealth replicaHealth : replicas) { if (replicaHealth.isLeader) hasLeader = true; if (!Replica.State.ACTIVE.toString().equals(replicaHealth.status)) { healthy = false; } else { atLeastOneActive = true; } } if (!hasLeader) return ShardState.no_leader; return healthy ? ShardState.healthy : (atLeastOneActive ? ShardState.degraded : ShardState.down); } public Map<String,Object> asMap() { Map<String,Object> map = new LinkedHashMap<>(); map.put("shard", shard); map.put("status", getShardState().toString()); List<Object> replicaList = new ArrayList<Object>(); for (ReplicaHealth replica : replicas) replicaList.add(replica.asMap()); map.put("replicas", replicaList); return map; } public String toString() { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(asMap()); return arr.toString(); } } /** * Requests health information about a specific collection in SolrCloud. */ public static class HealthcheckTool extends SolrCloudTool { public HealthcheckTool() { this(CLIO.getOutStream()); } public HealthcheckTool(PrintStream stdout) { super(stdout); } @Override public String getName() { return "healthcheck"; } @Override protected void runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String collection = cli.getOptionValue("collection"); if (collection == null) throw new IllegalArgumentException("Must provide a collection to run a healthcheck against!"); log.debug("Running healthcheck for {}", collection); ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); ClusterState clusterState = zkStateReader.getClusterState(); Set<String> liveNodes = clusterState.getLiveNodes(); final DocCollection docCollection = clusterState.getCollectionOrNull(collection); if (docCollection == null || docCollection.getSlices() == null) throw new IllegalArgumentException("Collection "+collection+" not found!"); Collection<Slice> slices = docCollection.getSlices(); // Test http code using a HEAD request first, fail fast if authentication failure String urlForColl = zkStateReader.getLeaderUrl(collection, slices.stream().findFirst().get().getName(), 1000); attemptHttpHead(urlForColl, cloudSolrClient.getHttpClient()); SolrQuery q = new SolrQuery("*:*"); q.setRows(0); QueryResponse qr = cloudSolrClient.query(q); String collErr = null; long docCount = -1; try { docCount = qr.getResults().getNumFound(); } catch (Exception exc) { collErr = String.valueOf(exc); } List<Object> shardList = new ArrayList<>(); boolean collectionIsHealthy = (docCount != -1); for (Slice slice : slices) { String shardName = slice.getName(); // since we're reporting health of this shard, there's no guarantee of a leader String leaderUrl = null; try { leaderUrl = zkStateReader.getLeaderUrl(collection, shardName, 1000); } catch (Exception exc) { log.warn("Failed to get leader for shard {} due to: {}", shardName, exc); } List<ReplicaHealth> replicaList = new ArrayList<ReplicaHealth>(); for (Replica r : slice.getReplicas()) { String uptime = null; String memory = null; String replicaStatus = null; long numDocs = -1L; ZkCoreNodeProps replicaCoreProps = new ZkCoreNodeProps(r); String coreUrl = replicaCoreProps.getCoreUrl(); boolean isLeader = coreUrl.equals(leaderUrl); // if replica's node is not live, its status is DOWN String nodeName = replicaCoreProps.getNodeName(); if (nodeName == null || !liveNodes.contains(nodeName)) { replicaStatus = Replica.State.DOWN.toString(); } else { // query this replica directly to get doc count and assess health q = new SolrQuery("*:*"); q.setRows(0); q.set(DISTRIB, "false"); try (HttpSolrClient solr = new HttpSolrClient.Builder(coreUrl).build()) { String solrUrl = solr.getBaseURL(); qr = solr.query(q); numDocs = qr.getResults().getNumFound(); int lastSlash = solrUrl.lastIndexOf('/'); String systemInfoUrl = solrUrl.substring(0,lastSlash)+"/admin/info/system"; Map<String,Object> info = getJson(solr.getHttpClient(), systemInfoUrl, 2, true); uptime = uptime(asLong("/jvm/jmx/upTimeMS", info)); String usedMemory = asString("/jvm/memory/used", info); String totalMemory = asString("/jvm/memory/total", info); memory = usedMemory+" of "+totalMemory; // if we get here, we can trust the state replicaStatus = replicaCoreProps.getState(); } catch (Exception exc) { log.error("ERROR: {} when trying to reach: {}", exc, coreUrl); if (checkCommunicationError(exc)) { replicaStatus = Replica.State.DOWN.toString(); } else { replicaStatus = "error: "+exc; } } } replicaList.add(new ReplicaHealth(shardName, r.getName(), coreUrl, replicaStatus, numDocs, isLeader, uptime, memory)); } ShardHealth shardHealth = new ShardHealth(shardName, replicaList); if (ShardState.healthy != shardHealth.getShardState()) collectionIsHealthy = false; // at least one shard is un-healthy shardList.add(shardHealth.asMap()); } Map<String,Object> report = new LinkedHashMap<String,Object>(); report.put("collection", collection); report.put("status", collectionIsHealthy ? "healthy" : "degraded"); if (collErr != null) { report.put("error", collErr); } report.put("numDocs", docCount); report.put("numShards", slices.size()); report.put("shards", shardList); CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(report); echo(arr.toString()); } } // end HealthcheckTool private static final Option[] CREATE_COLLECTION_OPTIONS = new Option[] { Option.builder("zkHost") .argName("HOST") .hasArg() .required(false) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("solrUrl") .argName("HOST") .hasArg() .required(false) .desc("Base Solr URL, which can be used to determine the zkHost if that's not known") .build(), Option.builder(NAME) .argName("NAME") .hasArg() .required(true) .desc("Name of collection to create.") .build(), Option.builder("shards") .argName("#") .hasArg() .required(false) .desc("Number of shards; default is 1") .build(), Option.builder("replicationFactor") .argName("#") .hasArg() .required(false) .desc("Number of copies of each document across the collection (replicas per shard); default is 1") .build(), Option.builder("confdir") .argName("NAME") .hasArg() .required(false) .desc("Configuration directory to copy when creating the new collection; default is "+DEFAULT_CONFIG_SET) .build(), Option.builder("confname") .argName("NAME") .hasArg() .required(false) .desc("Configuration name; default is the collection name") .build(), Option.builder("configsetsDir") .argName("DIR") .hasArg() .required(true) .desc("Path to configsets directory on the local system.") .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; /** * Get the base URL of a live Solr instance from either the solrUrl command-line option from ZooKeeper. */ public static String resolveSolrUrl(CommandLine cli) throws Exception { String solrUrl = cli.getOptionValue("solrUrl"); if (solrUrl == null) { String zkHost = cli.getOptionValue("zkHost"); if (zkHost == null) throw new IllegalStateException("Must provide either the '-solrUrl' or '-zkHost' parameters!"); try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).build()) { cloudSolrClient.connect(); Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes(); if (liveNodes.isEmpty()) throw new IllegalStateException("No live nodes found! Cannot determine 'solrUrl' from ZooKeeper: "+zkHost); String firstLiveNode = liveNodes.iterator().next(); solrUrl = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode); } } return solrUrl; } /** * Get the ZooKeeper connection string from either the zkHost command-line option or by looking it * up from a running Solr instance based on the solrUrl option. */ public static String getZkHost(CommandLine cli) throws Exception { String zkHost = cli.getOptionValue("zkHost"); if (zkHost != null) return zkHost; // find it using the localPort String solrUrl = cli.getOptionValue("solrUrl"); if (solrUrl == null) throw new IllegalStateException( "Must provide either the -zkHost or -solrUrl parameters to use the create_collection command!"); if (!solrUrl.endsWith("/")) solrUrl += "/"; String systemInfoUrl = solrUrl+"admin/info/system"; CloseableHttpClient httpClient = getHttpClient(); try { // hit Solr to get system info Map<String,Object> systemInfo = getJson(httpClient, systemInfoUrl, 2, true); // convert raw JSON into user-friendly output StatusTool statusTool = new StatusTool(); Map<String,Object> status = statusTool.reportStatus(solrUrl, systemInfo, httpClient); @SuppressWarnings("unchecked") Map<String,Object> cloud = (Map<String, Object>)status.get("cloud"); if (cloud != null) { String zookeeper = (String) cloud.get("ZooKeeper"); if (zookeeper.endsWith("(embedded)")) { zookeeper = zookeeper.substring(0, zookeeper.length() - "(embedded)".length()); } zkHost = zookeeper; } } finally { HttpClientUtil.close(httpClient); } return zkHost; } public static boolean safeCheckCollectionExists(String url, String collection) { boolean exists = false; try { Map<String,Object> existsCheckResult = getJson(url); @SuppressWarnings("unchecked") List<String> collections = (List<String>) existsCheckResult.get("collections"); exists = collections != null && collections.contains(collection); } catch (Exception exc) { // just ignore it since we're only interested in a positive result here } return exists; } public static boolean safeCheckCoreExists(String coreStatusUrl, String coreName) { boolean exists = false; try { boolean wait = false; final long startWaitAt = System.nanoTime(); do{ if (wait) { final int clamPeriodForStatusPollMs = 1000; Thread.sleep(clamPeriodForStatusPollMs); } Map<String,Object> existsCheckResult = getJson(coreStatusUrl); @SuppressWarnings("unchecked") Map<String,Object> status = (Map<String, Object>)existsCheckResult.get("status"); @SuppressWarnings("unchecked") Map<String,Object> coreStatus = (Map<String, Object>)status.get(coreName); @SuppressWarnings("unchecked") Map<String,Object> failureStatus = (Map<String, Object>)existsCheckResult.get("initFailures"); String errorMsg = (String) failureStatus.get(coreName); final boolean hasName = coreStatus != null && coreStatus.containsKey(NAME); exists = hasName || errorMsg != null; wait = hasName && errorMsg==null && "true".equals(coreStatus.get("isLoading")); }while (wait && System.nanoTime() - startWaitAt < MAX_WAIT_FOR_CORE_LOAD_NANOS); } catch (Exception exc) { // just ignore it since we're only interested in a positive result here } return exists; } /** * Supports create_collection command in the bin/solr script. */ public static class CreateCollectionTool extends ToolBase { public CreateCollectionTool() { this(CLIO.getOutStream()); } public CreateCollectionTool(PrintStream stdout) { super(stdout); } public String getName() { return "create_collection"; } public Option[] getOptions() { return CREATE_COLLECTION_OPTIONS; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at "+cli.getOptionValue("solrUrl")+ " is running in standalone server mode, please use the create_core command instead;\n" + "create_collection can only be used when running in SolrCloud mode.\n"); } try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).build()) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost+" ...", cli); cloudSolrClient.connect(); runCloudTool(cloudSolrClient, cli); } } protected void runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception { Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes(); if (liveNodes.isEmpty()) throw new IllegalStateException("No live nodes found! Cannot create a collection until " + "there is at least 1 live node in the cluster."); String baseUrl = cli.getOptionValue("solrUrl"); if (baseUrl == null) { String firstLiveNode = liveNodes.iterator().next(); baseUrl = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode); } String collectionName = cli.getOptionValue(NAME); // build a URL to create the collection int numShards = optionAsInt(cli, "shards", 1); int replicationFactor = optionAsInt(cli, "replicationFactor", 1); String confname = cli.getOptionValue("confname"); String confdir = cli.getOptionValue("confdir"); String configsetsDir = cli.getOptionValue("configsetsDir"); boolean configExistsInZk = confname != null && !"".equals(confname.trim()) && cloudSolrClient.getZkStateReader().getZkClient().exists("/configs/" + confname, true); if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) { //do nothing } else if (configExistsInZk) { echo("Re-using existing configuration directory "+confname); } else if (confdir != null && !"".equals(confdir.trim())){ if (confname == null || "".equals(confname.trim())) { confname = collectionName; } Path confPath = ZkConfigManager.getConfigsetPath(confdir, configsetsDir); echoIfVerbose("Uploading " + confPath.toAbsolutePath().toString() + " for config " + confname + " to ZooKeeper at " + cloudSolrClient.getZkHost(), cli); ((ZkClientClusterStateProvider) cloudSolrClient.getClusterStateProvider()).uploadConfig(confPath, confname); } // since creating a collection is a heavy-weight operation, check for existence first String collectionListUrl = baseUrl+"/admin/collections?action=list"; if (safeCheckCollectionExists(collectionListUrl, collectionName)) { throw new IllegalStateException("\nCollection '"+collectionName+ "' already exists!\nChecked collection existence using Collections API command:\n"+ collectionListUrl); } // doesn't seem to exist ... try to create String createCollectionUrl = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%d&replicationFactor=%d", baseUrl, collectionName, numShards, replicationFactor); if (confname != null && !"".equals(confname.trim())) { createCollectionUrl = createCollectionUrl + String.format(Locale.ROOT, "&collection.configName=%s", confname); } echoIfVerbose("\nCreating new collection '"+collectionName+"' using command:\n"+createCollectionUrl+"\n", cli); Map<String,Object> json = null; try { json = getJson(createCollectionUrl); } catch (SolrServerException sse) { throw new Exception("Failed to create collection '"+collectionName+"' due to: "+sse.getMessage()); } if (cli.hasOption("verbose")) { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(json); echo(arr.toString()); } else { String endMessage = String.format(Locale.ROOT, "Created collection '%s' with %d shard(s), %d replica(s)", collectionName, numShards, replicationFactor); if (confname != null && !"".equals(confname.trim())) { endMessage += String.format(Locale.ROOT, " with config-set '%s'", confname); } echo(endMessage); } } protected int optionAsInt(CommandLine cli, String option, int defaultVal) { return Integer.parseInt(cli.getOptionValue(option, String.valueOf(defaultVal))); } } // end CreateCollectionTool class public static class CreateCoreTool extends ToolBase { public CreateCoreTool() { this(CLIO.getOutStream()); } public CreateCoreTool(PrintStream stdout) { super(stdout); } public String getName() { return "create_core"; } public Option[] getOptions() { return new Option[] { Option.builder("solrUrl") .argName("URL") .hasArg() .required(false) .desc("Base Solr URL, default is " + DEFAULT_SOLR_URL) .build(), Option.builder(NAME) .argName("NAME") .hasArg() .required(true) .desc("Name of the core to create.") .build(), Option.builder("confdir") .argName("CONFIG") .hasArg() .required(false) .desc("Configuration directory to copy when creating the new core; default is "+DEFAULT_CONFIG_SET) .build(), Option.builder("configsetsDir") .argName("DIR") .hasArg() .required(true) .desc("Path to configsets directory on the local system.") .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } protected void runImpl(CommandLine cli) throws Exception { String solrUrl = cli.getOptionValue("solrUrl", DEFAULT_SOLR_URL); if (!solrUrl.endsWith("/")) solrUrl += "/"; File configsetsDir = new File(cli.getOptionValue("configsetsDir")); if (!configsetsDir.isDirectory()) throw new FileNotFoundException(configsetsDir.getAbsolutePath() + " not found!"); String configSet = cli.getOptionValue("confdir", DEFAULT_CONFIG_SET); File configSetDir = new File(configsetsDir, configSet); if (!configSetDir.isDirectory()) { // we allow them to pass a directory instead of a configset name File possibleConfigDir = new File(configSet); if (possibleConfigDir.isDirectory()) { configSetDir = possibleConfigDir; } else { throw new FileNotFoundException("Specified config directory " + configSet + " not found in " + configsetsDir.getAbsolutePath()); } } String coreName = cli.getOptionValue(NAME); String systemInfoUrl = solrUrl+"admin/info/system"; CloseableHttpClient httpClient = getHttpClient(); String coreRootDirectory = null; //usually same as solr home, but not always try { Map<String,Object> systemInfo = getJson(httpClient, systemInfoUrl, 2, true); if ("solrcloud".equals(systemInfo.get("mode"))) { throw new IllegalStateException("Solr at "+solrUrl+ " is running in SolrCloud mode, please use create_collection command instead."); } // convert raw JSON into user-friendly output coreRootDirectory = (String)systemInfo.get("core_root"); //Fall back to solr_home, in case we are running against older server that does not return the property if (coreRootDirectory == null) coreRootDirectory = (String)systemInfo.get("solr_home"); if (coreRootDirectory == null) coreRootDirectory = configsetsDir.getParentFile().getAbsolutePath(); } finally { closeHttpClient(httpClient); } String coreStatusUrl = solrUrl+"admin/cores?action=STATUS&core="+coreName; if (safeCheckCoreExists(coreStatusUrl, coreName)) { throw new IllegalArgumentException("\nCore '"+coreName+ "' already exists!\nChecked core existence using Core API command:\n"+coreStatusUrl); } File coreInstanceDir = new File(coreRootDirectory, coreName); File confDir = new File(configSetDir,"conf"); if (!coreInstanceDir.isDirectory()) { coreInstanceDir.mkdirs(); if (!coreInstanceDir.isDirectory()) throw new IOException("Failed to create new core instance directory: "+coreInstanceDir.getAbsolutePath()); if (confDir.isDirectory()) { FileUtils.copyDirectoryToDirectory(confDir, coreInstanceDir); } else { // hmmm ... the configset we're cloning doesn't have a conf sub-directory, // we'll just assume it is OK if it has solrconfig.xml if ((new File(configSetDir, "solrconfig.xml")).isFile()) { FileUtils.copyDirectory(configSetDir, new File(coreInstanceDir, "conf")); } else { throw new IllegalArgumentException("\n"+configSetDir.getAbsolutePath()+" doesn't contain a conf subdirectory or solrconfig.xml\n"); } } echoIfVerbose("\nCopying configuration to new core instance directory:\n" + coreInstanceDir.getAbsolutePath(), cli); } String createCoreUrl = String.format(Locale.ROOT, "%sadmin/cores?action=CREATE&name=%s&instanceDir=%s", solrUrl, coreName, coreName); echoIfVerbose("\nCreating new core '" + coreName + "' using command:\n" + createCoreUrl + "\n", cli); try { Map<String,Object> json = getJson(createCoreUrl); if (cli.hasOption("verbose")) { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(json); echo(arr.toString()); echo("\n"); } else { echo(String.format(Locale.ROOT, "\nCreated new core '%s'", coreName)); } } catch (Exception e) { /* create-core failed, cleanup the copied configset before propagating the error. */ FileUtils.deleteDirectory(coreInstanceDir); throw e; } } } // end CreateCoreTool class public static class CreateTool extends ToolBase { public CreateTool() { this(CLIO.getOutStream()); } public CreateTool(PrintStream stdout) { super(stdout); } public String getName() { return "create"; } public Option[] getOptions() { return CREATE_COLLECTION_OPTIONS; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String solrUrl = cli.getOptionValue("solrUrl", DEFAULT_SOLR_URL); if (!solrUrl.endsWith("/")) solrUrl += "/"; String systemInfoUrl = solrUrl+"admin/info/system"; CloseableHttpClient httpClient = getHttpClient(); ToolBase tool = null; try { Map<String, Object> systemInfo = getJson(httpClient, systemInfoUrl, 2, true); if ("solrcloud".equals(systemInfo.get("mode"))) { tool = new CreateCollectionTool(stdout); } else { tool = new CreateCoreTool(stdout); } tool.runImpl(cli); } finally { closeHttpClient(httpClient); } } } // end CreateTool class public static class ConfigSetUploadTool extends ToolBase { public ConfigSetUploadTool() { this(CLIO.getOutStream()); } public ConfigSetUploadTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("confname") .argName("confname") // Comes out in help message .hasArg() // Has one sub-argument .required(true) // confname argument must be present .desc("Configset name on Zookeeper") .build(), // passed as -confname value Option.builder("confdir") .argName("confdir") .hasArg() .required(true) .desc("Local directory with configs") .build(), Option.builder("configsetsDir") .argName("configsetsDir") .hasArg() .required(false) .desc("Parent directory of example configsets") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "upconfig"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("solrUrl") + " is running in standalone server mode, upconfig can only be used when running in SolrCloud mode.\n"); } String confName = cli.getOptionValue("confname"); try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); Path confPath = ZkConfigManager.getConfigsetPath(cli.getOptionValue("confdir"), cli.getOptionValue("configsetsDir")); echo("Uploading " + confPath.toAbsolutePath().toString() + " for config " + cli.getOptionValue("confname") + " to ZooKeeper at " + zkHost); zkClient.upConfig(confPath, confName); } catch (Exception e) { log.error("Could not complete upconfig operation for reason: ", e); throw (e); } } } public static class ConfigSetDownloadTool extends ToolBase { public ConfigSetDownloadTool() { this(CLIO.getOutStream()); } public ConfigSetDownloadTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("confname") .argName("confname") .hasArg() .required(true) .desc("Configset name on Zookeeper") .build(), Option.builder("confdir") .argName("confdir") .hasArg() .required(true) .desc("Local directory with configs") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "downconfig"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("solrUrl") + " is running in standalone server mode, downconfig can only be used when running in SolrCloud mode.\n"); } try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); String confName = cli.getOptionValue("confname"); String confDir = cli.getOptionValue("confdir"); Path configSetPath = Paths.get(confDir); // we try to be nice about having the "conf" in the directory, and we create it if it's not there. if (configSetPath.endsWith("/conf") == false) { configSetPath = Paths.get(configSetPath.toString(), "conf"); } if (Files.exists(configSetPath) == false) { Files.createDirectories(configSetPath); } echo("Downloading configset " + confName + " from ZooKeeper at " + zkHost + " to directory " + configSetPath.toAbsolutePath()); zkClient.downConfig(confName, configSetPath); } catch (Exception e) { log.error("Could not complete downconfig operation for reason: ", e); throw (e); } } } // End ConfigSetDownloadTool class public static class ZkRmTool extends ToolBase { public ZkRmTool() { this(CLIO.getOutStream()); } public ZkRmTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("path") .argName("path") .hasArg() .required(true) .desc("Path to remove") .build(), Option.builder("recurse") .argName("recurse") .hasArg() .required(false) .desc("Recurse (true|false, default is false)") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "rm"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("zkHost") + " is running in standalone server mode, 'zk rm' can only be used when running in SolrCloud mode.\n"); } String target = cli.getOptionValue("path"); Boolean recurse = Boolean.parseBoolean(cli.getOptionValue("recurse")); String znode = target; if (target.toLowerCase(Locale.ROOT).startsWith("zk:")) { znode = target.substring(3); } if (znode.equals("/")) { throw new SolrServerException("You may not remove the root ZK node ('/')!"); } echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { if (recurse == false && zkClient.getChildren(znode, null, true).size() != 0) { throw new SolrServerException("Zookeeper node " + znode + " has children and recurse has NOT been specified"); } echo("Removing Zookeeper node " + znode + " from ZooKeeper at " + zkHost + " recurse: " + Boolean.toString(recurse)); zkClient.clean(znode); } catch (Exception e) { log.error("Could not complete rm operation for reason: ", e); throw (e); } } } // End RmTool class public static class ZkLsTool extends ToolBase { public ZkLsTool() { this(CLIO.getOutStream()); } public ZkLsTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("path") .argName("path") .hasArg() .required(true) .desc("Path to list") .build(), Option.builder("recurse") .argName("recurse") .hasArg() .required(false) .desc("Recurse (true|false, default is false)") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "ls"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("zkHost") + " is running in standalone server mode, 'zk ls' can only be used when running in SolrCloud mode.\n"); } try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); String znode = cli.getOptionValue("path"); Boolean recurse = Boolean.parseBoolean(cli.getOptionValue("recurse")); echoIfVerbose("Getting listing for Zookeeper node " + znode + " from ZooKeeper at " + zkHost + " recurse: " + Boolean.toString(recurse), cli); stdout.print(zkClient.listZnode(znode, recurse)); } catch (Exception e) { log.error("Could not complete ls operation for reason: ", e); throw (e); } } } // End zkLsTool class public static class ZkMkrootTool extends ToolBase { public ZkMkrootTool() { this(CLIO.getOutStream()); } public ZkMkrootTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("path") .argName("path") .hasArg() .required(true) .desc("Path to create") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "mkroot"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("zkHost") + " is running in standalone server mode, 'zk mkroot' can only be used when running in SolrCloud mode.\n"); } try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); String znode = cli.getOptionValue("path"); echo("Creating Zookeeper path " + znode + " on ZooKeeper at " + zkHost); zkClient.makePath(znode, true); } catch (Exception e) { log.error("Could not complete mkroot operation for reason: ", e); throw (e); } } } // End zkMkrootTool class public static class ZkCpTool extends ToolBase { public ZkCpTool() { this(CLIO.getOutStream()); } public ZkCpTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("src") .argName("src") .hasArg() .required(true) .desc("Source file or directory, may be local or a Znode") .build(), Option.builder("dst") .argName("dst") .hasArg() .required(true) .desc("Destination of copy, may be local or a Znode.") .build(), Option.builder("recurse") .argName("recurse") .hasArg() .required(false) .desc("Recurse (true|false, default is false)") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "cp"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("solrUrl") + " is running in standalone server mode, cp can only be used when running in SolrCloud mode.\n"); } try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); String src = cli.getOptionValue("src"); String dst = cli.getOptionValue("dst"); Boolean recurse = Boolean.parseBoolean(cli.getOptionValue("recurse")); echo("Copying from '" + src + "' to '" + dst + "'. ZooKeeper at " + zkHost); boolean srcIsZk = src.toLowerCase(Locale.ROOT).startsWith("zk:"); boolean dstIsZk = dst.toLowerCase(Locale.ROOT).startsWith("zk:"); String srcName = src; if (srcIsZk) { srcName = src.substring(3); } else if (srcName.toLowerCase(Locale.ROOT).startsWith("file:")) { srcName = srcName.substring(5); } String dstName = dst; if (dstIsZk) { dstName = dst.substring(3); } else { if (dstName.toLowerCase(Locale.ROOT).startsWith("file:")) { dstName = dstName.substring(5); } } zkClient.zkTransfer(srcName, srcIsZk, dstName, dstIsZk, recurse); } catch (Exception e) { log.error("Could not complete the zk operation for reason: ", e); throw (e); } } } // End CpTool class public static class ZkMvTool extends ToolBase { public ZkMvTool() { this(CLIO.getOutStream()); } public ZkMvTool(PrintStream stdout) { super(stdout); } public Option[] getOptions() { return new Option[]{ Option.builder("src") .argName("src") .hasArg() .required(true) .desc("Source Znode to movej from.") .build(), Option.builder("dst") .argName("dst") .hasArg() .required(true) .desc("Destination Znode to move to.") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(true) .desc("Address of the Zookeeper ensemble; defaults to: " + ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } public String getName() { return "mv"; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String zkHost = getZkHost(cli); if (zkHost == null) { throw new IllegalStateException("Solr at " + cli.getOptionValue("solrUrl") + " is running in standalone server mode, downconfig can only be used when running in SolrCloud mode.\n"); } try (SolrZkClient zkClient = new SolrZkClient(zkHost, 30000)) { echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli); String src = cli.getOptionValue("src"); String dst = cli.getOptionValue("dst"); if (src.toLowerCase(Locale.ROOT).startsWith("file:") || dst.toLowerCase(Locale.ROOT).startsWith("file:")) { throw new SolrServerException("mv command operates on znodes and 'file:' has been specified."); } String source = src; if (src.toLowerCase(Locale.ROOT).startsWith("zk")) { source = src.substring(3); } String dest = dst; if (dst.toLowerCase(Locale.ROOT).startsWith("zk")) { dest = dst.substring(3); } echo("Moving Znode " + source + " to " + dest + " on ZooKeeper at " + zkHost); zkClient.moveZnode(source, dest); } catch (Exception e) { log.error("Could not complete mv operation for reason: ", e); throw (e); } } } // End MvTool class public static class DeleteTool extends ToolBase { public DeleteTool() { this(CLIO.getOutStream()); } public DeleteTool(PrintStream stdout) { super(stdout); } public String getName() { return "delete"; } public Option[] getOptions() { return new Option[]{ Option.builder("solrUrl") .argName("URL") .hasArg() .required(false) .desc("Base Solr URL, default is " + DEFAULT_SOLR_URL) .build(), Option.builder(NAME) .argName("NAME") .hasArg() .required(true) .desc("Name of the core / collection to delete.") .build(), Option.builder("deleteConfig") .argName("true|false") .hasArg() .required(false) .desc("Flag to indicate if the underlying configuration directory for a collection should also be deleted; default is true") .build(), Option.builder("forceDeleteConfig") .required(false) .desc("Skip safety checks when deleting the configuration directory used by a collection") .build(), Option.builder("zkHost") .argName("HOST") .hasArg() .required(false) .desc("Address of the Zookeeper ensemble; defaults to: "+ZK_HOST) .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } protected void runImpl(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); String solrUrl = cli.getOptionValue("solrUrl", DEFAULT_SOLR_URL); if (!solrUrl.endsWith("/")) solrUrl += "/"; String systemInfoUrl = solrUrl+"admin/info/system"; CloseableHttpClient httpClient = getHttpClient(); try { Map<String,Object> systemInfo = getJson(httpClient, systemInfoUrl, 2, true); if ("solrcloud".equals(systemInfo.get("mode"))) { deleteCollection(cli); } else { deleteCore(cli, httpClient, solrUrl); } } finally { closeHttpClient(httpClient); } } protected void deleteCollection(CommandLine cli) throws Exception { String zkHost = getZkHost(cli); try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).withSocketTimeout(30000).withConnectionTimeout(15000).build()) { echoIfVerbose("Connecting to ZooKeeper at " + zkHost, cli); cloudSolrClient.connect(); deleteCollection(cloudSolrClient, cli); } } protected void deleteCollection(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception { Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes(); if (liveNodes.isEmpty()) throw new IllegalStateException("No live nodes found! Cannot delete a collection until " + "there is at least 1 live node in the cluster."); String firstLiveNode = liveNodes.iterator().next(); ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); String baseUrl = zkStateReader.getBaseUrlForNodeName(firstLiveNode); String collectionName = cli.getOptionValue(NAME); if (!zkStateReader.getClusterState().hasCollection(collectionName)) { throw new IllegalArgumentException("Collection "+collectionName+" not found!"); } String configName = zkStateReader.readConfigName(collectionName); boolean deleteConfig = "true".equals(cli.getOptionValue("deleteConfig", "true")); if (deleteConfig && configName != null) { if (cli.hasOption("forceDeleteConfig")) { log.warn("Skipping safety checks, configuration directory {} will be deleted with impunity.", configName); } else { // need to scan all Collections to see if any are using the config Set<String> collections = zkStateReader.getClusterState().getCollectionsMap().keySet(); // give a little note to the user if there are many collections in case it takes a while if (collections.size() > 50) if (log.isInfoEnabled()) { log.info("Scanning {} to ensure no other collections are using config {}", collections.size(), configName); } for (String next : collections) { if (collectionName.equals(next)) continue; // don't check the collection we're deleting if (configName.equals(zkStateReader.readConfigName(next))) { deleteConfig = false; log.warn("Configuration directory {} is also being used by {}{}" , configName, next , "; configuration will not be deleted from ZooKeeper. You can pass the -forceDeleteConfig flag to force delete."); break; } } } } String deleteCollectionUrl = String.format(Locale.ROOT, "%s/admin/collections?action=DELETE&name=%s", baseUrl, collectionName); echoIfVerbose("\nDeleting collection '" + collectionName + "' using command:\n" + deleteCollectionUrl + "\n", cli); Map<String,Object> json = null; try { json = getJson(deleteCollectionUrl); } catch (SolrServerException sse) { throw new Exception("Failed to delete collection '"+collectionName+"' due to: "+sse.getMessage()); } if (deleteConfig) { String configZnode = "/configs/" + configName; try { zkStateReader.getZkClient().clean(configZnode); } catch (Exception exc) { echo("\nWARNING: Failed to delete configuration directory "+configZnode+" in ZooKeeper due to: "+ exc.getMessage()+"\nYou'll need to manually delete this znode using the zkcli script."); } } if (json != null) { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(json); echo(arr.toString()); echo("\n"); } echo("Deleted collection '" + collectionName + "' using command:\n" + deleteCollectionUrl); } protected void deleteCore(CommandLine cli, CloseableHttpClient httpClient, String solrUrl) throws Exception { String coreName = cli.getOptionValue(NAME); String deleteCoreUrl = String.format(Locale.ROOT, "%sadmin/cores?action=UNLOAD&core=%s&deleteIndex=true&deleteDataDir=true&deleteInstanceDir=true", solrUrl, coreName); echo("\nDeleting core '" + coreName + "' using command:\n" + deleteCoreUrl + "\n"); Map<String,Object> json = null; try { json = getJson(deleteCoreUrl); } catch (SolrServerException sse) { throw new Exception("Failed to delete core '"+coreName+"' due to: "+sse.getMessage()); } if (json != null) { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(json); echoIfVerbose(arr.toString(), cli); echoIfVerbose("\n", cli); } } } // end DeleteTool class /** * Sends a POST to the Config API to perform a specified action. */ public static class ConfigTool extends ToolBase { public ConfigTool() { this(CLIO.getOutStream()); } public ConfigTool(PrintStream stdout) { super(stdout); } public String getName() { return "config"; } @Override public Option[] getOptions() { Option[] configOptions = new Option[] { Option.builder("action") .argName("ACTION") .hasArg() .required(false) .desc("Config API action, one of: set-property, unset-property; default is set-property") .build(), Option.builder("property") .argName("PROP") .hasArg() .required(true) .desc("Name of the Config API property to apply the action to, such as: updateHandler.autoSoftCommit.maxTime") .build(), Option.builder("value") .argName("VALUE") .hasArg() .required(false) .desc("Set the property to this value; accepts JSON objects and strings") .build(), Option.builder("solrUrl") .argName("HOST") .hasArg() .required(false) .desc("Base Solr URL, which can be used to determine the zkHost if that's not known") .build(), Option.builder("z") .argName("HOST") .hasArg() .required(false) .desc("Address of the Zookeeper ensemble") .longOpt("zkHost") .build(), Option.builder("p") .argName("PORT") .hasArg() .required(false) .desc("The port of the Solr node to use when applying configuration change") .longOpt("port") .build(), Option.builder("s") .argName("SCHEME") .hasArg() .required(false) .desc("The scheme for accessing Solr. Accepted values: http or https. Default: http") .longOpt("scheme") .build() }; return joinOptions(configOptions, cloudOptions); } protected void runImpl(CommandLine cli) throws Exception { String solrUrl; try { solrUrl = resolveSolrUrl(cli); } catch (IllegalStateException e) { // Fallback to using the provided scheme and port final String scheme = cli.getOptionValue("scheme", "http"); if (cli.hasOption("port")) { solrUrl = scheme + "://localhost:" + cli.getOptionValue("port", "8983") + "/solr"; } else { throw e; } } String action = cli.getOptionValue("action", "set-property"); String collection = cli.getOptionValue("collection", "gettingstarted"); String property = cli.getOptionValue("property"); String value = cli.getOptionValue("value"); Map<String,Object> jsonObj = new HashMap<>(); if (value != null) { Map<String,String> setMap = new HashMap<>(); setMap.put(property, value); jsonObj.put(action, setMap); } else { jsonObj.put(action, property); } CharArr arr = new CharArr(); (new JSONWriter(arr, 0)).write(jsonObj); String jsonBody = arr.toString(); String updatePath = "/"+collection+"/config"; echo("\nPOSTing request to Config API: " + solrUrl + updatePath); echo(jsonBody); try (SolrClient solrClient = new HttpSolrClient.Builder(solrUrl).build()) { NamedList<Object> result = postJsonToSolr(solrClient, updatePath, jsonBody); Integer statusCode = (Integer)((NamedList)result.get("responseHeader")).get("status"); if (statusCode == 0) { if (value != null) { echo("Successfully " + action + " " + property + " to " + value); } else { echo("Successfully " + action + " " + property); } } else { throw new Exception("Failed to "+action+" property due to:\n"+result); } } } } // end ConfigTool class /** * Supports an interactive session with the user to launch (or relaunch the -e cloud example) */ public static class RunExampleTool extends ToolBase { private static final String PROMPT_FOR_NUMBER = "Please enter %s [%d]: "; private static final String PROMPT_FOR_NUMBER_IN_RANGE = "Please enter %s between %d and %d [%d]: "; private static final String PROMPT_NUMBER_TOO_SMALL = "%d is too small! "+PROMPT_FOR_NUMBER_IN_RANGE; private static final String PROMPT_NUMBER_TOO_LARGE = "%d is too large! "+PROMPT_FOR_NUMBER_IN_RANGE; protected InputStream userInput; protected Executor executor; protected String script; protected File serverDir; protected File exampleDir; protected String urlScheme; /** * Default constructor used by the framework when running as a command-line application. */ public RunExampleTool() { this(null, System.in, CLIO.getOutStream()); } public RunExampleTool(Executor executor, InputStream userInput, PrintStream stdout) { super(stdout); this.executor = (executor != null) ? executor : new DefaultExecutor(); this.userInput = userInput; } public String getName() { return "run_example"; } public Option[] getOptions() { return new Option[] { Option.builder("noprompt") .required(false) .desc("Don't prompt for input; accept all defaults when running examples that accept user input") .build(), Option.builder("e") .argName("NAME") .hasArg() .required(true) .desc("Name of the example to launch, one of: cloud, techproducts, schemaless") .longOpt("example") .build(), Option.builder("script") .argName("PATH") .hasArg() .required(false) .desc("Path to the bin/solr script") .build(), Option.builder("d") .argName("DIR") .hasArg() .required(true) .desc("Path to the Solr server directory.") .longOpt("serverDir") .build(), Option.builder("force") .argName("FORCE") .desc("Force option in case Solr is run as root") .build(), Option.builder("exampleDir") .argName("DIR") .hasArg() .required(false) .desc("Path to the Solr example directory; if not provided, ${serverDir}/../example is expected to exist.") .build(), Option.builder("urlScheme") .argName("SCHEME") .hasArg() .required(false) .desc("Solr URL scheme: http or https, defaults to http if not specified") .build(), Option.builder("p") .argName("PORT") .hasArg() .required(false) .desc("Specify the port to start the Solr HTTP listener on; default is 8983") .longOpt("port") .build(), Option.builder("h") .argName("HOSTNAME") .hasArg() .required(false) .desc("Specify the hostname for this Solr instance") .longOpt("host") .build(), Option.builder("z") .argName("ZKHOST") .hasArg() .required(false) .desc("ZooKeeper connection string; only used when running in SolrCloud mode using -c") .longOpt("zkhost") .build(), Option.builder("c") .required(false) .desc("Start Solr in SolrCloud mode; if -z not supplied, an embedded ZooKeeper instance is started on Solr port+1000, such as 9983 if Solr is bound to 8983") .longOpt("cloud") .build(), Option.builder("m") .argName("MEM") .hasArg() .required(false) .desc("Sets the min (-Xms) and max (-Xmx) heap size for the JVM, such as: -m 4g results in: -Xms4g -Xmx4g; by default, this script sets the heap size to 512m") .longOpt("memory") .build(), Option.builder("a") .argName("OPTS") .hasArg() .required(false) .desc("Additional options to be passed to the JVM when starting example Solr server(s)") .longOpt("addlopts") .build() }; } protected void runImpl(CommandLine cli) throws Exception { this.urlScheme = cli.getOptionValue("urlScheme", "http"); UrlScheme.INSTANCE.setUrlScheme(this.urlScheme); serverDir = new File(cli.getOptionValue("serverDir")); if (!serverDir.isDirectory()) throw new IllegalArgumentException("Value of -serverDir option is invalid! "+ serverDir.getAbsolutePath()+" is not a directory!"); script = cli.getOptionValue("script"); if (script != null) { if (!(new File(script)).isFile()) throw new IllegalArgumentException("Value of -script option is invalid! "+script+" not found"); } else { File scriptFile = new File(serverDir.getParentFile(), "bin/solr"); if (scriptFile.isFile()) { script = scriptFile.getAbsolutePath(); } else { scriptFile = new File(serverDir.getParentFile(), "bin/solr.cmd"); if (scriptFile.isFile()) { script = scriptFile.getAbsolutePath(); } else { throw new IllegalArgumentException("Cannot locate the bin/solr script! Please pass -script to this application."); } } } exampleDir = (cli.hasOption("exampleDir")) ? new File(cli.getOptionValue("exampleDir")) : new File(serverDir.getParent(), "example"); if (!exampleDir.isDirectory()) throw new IllegalArgumentException("Value of -exampleDir option is invalid! "+ exampleDir.getAbsolutePath()+" is not a directory!"); if (verbose) { echo("Running with\nserverDir="+serverDir.getAbsolutePath()+ ",\nexampleDir="+exampleDir.getAbsolutePath()+"\nscript="+script); } String exampleType = cli.getOptionValue("example"); if ("cloud".equals(exampleType)) { runCloudExample(cli); } else if ("techproducts".equals(exampleType) || "schemaless".equals(exampleType)) { runExample(cli, exampleType); } else { throw new IllegalArgumentException("Unsupported example "+exampleType+ "! Please choose one of: cloud, schemaless, or techproducts"); } } protected void runExample(CommandLine cli, String exampleName) throws Exception { File exDir = setupExampleDir(serverDir, exampleDir, exampleName); String collectionName = "schemaless".equals(exampleName) ? "gettingstarted" : exampleName; String configSet = "techproducts".equals(exampleName) ? "sample_techproducts_configs" : "_default"; boolean isCloudMode = cli.hasOption('c'); String zkHost = cli.getOptionValue('z'); int port = Integer.parseInt(cli.getOptionValue('p', "8983")); Map<String,Object> nodeStatus = startSolr(new File(exDir, "solr"), isCloudMode, cli, port, zkHost, 30); // invoke the CreateTool File configsetsDir = new File(serverDir, "solr/configsets"); String solrUrl = (String)nodeStatus.get("baseUrl"); // safe check if core / collection already exists boolean alreadyExists = false; if (nodeStatus.get("cloud") != null) { String collectionListUrl = solrUrl+"/admin/collections?action=list"; if (safeCheckCollectionExists(collectionListUrl, collectionName)) { alreadyExists = true; echo("\nWARNING: Collection '"+collectionName+ "' already exists!\nChecked collection existence using Collections API command:\n"+collectionListUrl+"\n"); } } else { String coreName = collectionName; String coreStatusUrl = solrUrl+"/admin/cores?action=STATUS&core="+coreName; if (safeCheckCoreExists(coreStatusUrl, coreName)) { alreadyExists = true; echo("\nWARNING: Core '" + coreName + "' already exists!\nChecked core existence using Core API command:\n" + coreStatusUrl+"\n"); } } if (!alreadyExists) { String[] createArgs = new String[] { "-name", collectionName, "-shards", "1", "-replicationFactor", "1", "-confname", collectionName, "-confdir", configSet, "-configsetsDir", configsetsDir.getAbsolutePath(), "-solrUrl", solrUrl }; CreateTool createTool = new CreateTool(stdout); int createCode = createTool.runTool(processCommandLineArgs(joinCommonAndToolOptions(createTool.getOptions()), createArgs)); if (createCode != 0) throw new Exception("Failed to create "+collectionName+" using command: "+ Arrays.asList(createArgs)); } if ("techproducts".equals(exampleName) && !alreadyExists) { File exampledocsDir = new File(exampleDir, "exampledocs"); if (!exampledocsDir.isDirectory()) { File readOnlyExampleDir = new File(serverDir.getParentFile(), "example"); if (readOnlyExampleDir.isDirectory()) { exampledocsDir = new File(readOnlyExampleDir, "exampledocs"); } } if (exampledocsDir.isDirectory()) { String updateUrl = String.format(Locale.ROOT, "%s/%s/update", solrUrl, collectionName); echo("Indexing tech product example docs from "+exampledocsDir.getAbsolutePath()); String currentPropVal = System.getProperty("url"); System.setProperty("url", updateUrl); SimplePostTool.main(new String[] {exampledocsDir.getAbsolutePath()+"/*.xml"}); if (currentPropVal != null) { System.setProperty("url", currentPropVal); // reset } else { System.clearProperty("url"); } } else { echo("exampledocs directory not found, skipping indexing step for the techproducts example"); } } echo("\nSolr "+exampleName+" example launched successfully. Direct your Web browser to "+solrUrl+" to visit the Solr Admin UI"); } protected void runCloudExample(CommandLine cli) throws Exception { boolean prompt = !cli.hasOption("noprompt"); int numNodes = 2; int[] cloudPorts = new int[]{ 8983, 7574, 8984, 7575 }; File cloudDir = new File(exampleDir, "cloud"); if (!cloudDir.isDirectory()) cloudDir.mkdir(); echo("\nWelcome to the SolrCloud example!\n"); Scanner readInput = prompt ? new Scanner(userInput, StandardCharsets.UTF_8.name()) : null; if (prompt) { echo("This interactive session will help you launch a SolrCloud cluster on your local workstation."); // get the number of nodes to start numNodes = promptForInt(readInput, "To begin, how many Solr nodes would you like to run in your local cluster? (specify 1-4 nodes) [2]: ", "a number", numNodes, 1, 4); echo("Ok, let's start up "+numNodes+" Solr nodes for your example SolrCloud cluster."); // get the ports for each port for (int n=0; n < numNodes; n++) { String promptMsg = String.format(Locale.ROOT, "Please enter the port for node%d [%d]: ", (n+1), cloudPorts[n]); int port = promptForPort(readInput, n+1, promptMsg, cloudPorts[n]); while (!isPortAvailable(port)) { port = promptForPort(readInput, n+1, "Oops! Looks like port "+port+ " is already being used by another process. Please choose a different port.", cloudPorts[n]); } cloudPorts[n] = port; if (verbose) echo("Using port "+port+" for node "+(n+1)); } } else { echo("Starting up "+numNodes+" Solr nodes for your example SolrCloud cluster.\n"); } // setup a unique solr.solr.home directory for each node File node1Dir = setupExampleDir(serverDir, cloudDir, "node1"); for (int n=2; n <= numNodes; n++) { File nodeNDir = new File(cloudDir, "node"+n); if (!nodeNDir.isDirectory()) { echo("Cloning " + node1Dir.getAbsolutePath() + " into\n "+nodeNDir.getAbsolutePath()); FileUtils.copyDirectory(node1Dir, nodeNDir); } else { echo(nodeNDir.getAbsolutePath()+" already exists."); } } // deal with extra args passed to the script to run the example String zkHost = cli.getOptionValue('z'); // start the first node (most likely with embedded ZK) Map<String,Object> nodeStatus = startSolr(new File(node1Dir,"solr"), true, cli, cloudPorts[0], zkHost, 30); if (zkHost == null) { @SuppressWarnings("unchecked") Map<String,Object> cloudStatus = (Map<String,Object>)nodeStatus.get("cloud"); if (cloudStatus != null) { String zookeeper = (String)cloudStatus.get("ZooKeeper"); if (zookeeper != null) zkHost = zookeeper; } if (zkHost == null) throw new Exception("Could not get the ZooKeeper connection string for node1!"); } if (numNodes > 1) { // start the other nodes for (int n = 1; n < numNodes; n++) startSolr(new File(cloudDir, "node"+(n+1)+"/solr"), true, cli, cloudPorts[n], zkHost, 30); } String solrUrl = (String)nodeStatus.get("baseUrl"); if (solrUrl.endsWith("/")) solrUrl = solrUrl.substring(0,solrUrl.length()-1); // wait until live nodes == numNodes waitToSeeLiveNodes(10 /* max wait */, zkHost, numNodes); // create the collection String collectionName = createCloudExampleCollection(numNodes, readInput, prompt, solrUrl); // update the config to enable soft auto-commit echo("\nEnabling auto soft-commits with maxTime 3 secs using the Config API"); setCollectionConfigProperty(solrUrl, collectionName, "updateHandler.autoSoftCommit.maxTime", "3000"); echo("\n\nSolrCloud example running, please visit: "+solrUrl+" \n"); } protected void setCollectionConfigProperty(String solrUrl, String collectionName, String propName, String propValue) { ConfigTool configTool = new ConfigTool(stdout); String[] configArgs = new String[] { "-collection", collectionName, "-property", propName, "-value", propValue, "-solrUrl", solrUrl }; // let's not fail if we get this far ... just report error and finish up try { configTool.runTool(processCommandLineArgs(joinCommonAndToolOptions(configTool.getOptions()), configArgs)); } catch (Exception exc) { CLIO.err("Failed to update '"+propName+"' property due to: "+exc); } } protected void waitToSeeLiveNodes(int maxWaitSecs, String zkHost, int numNodes) { CloudSolrClient cloudClient = null; try { cloudClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()) .build(); cloudClient.connect(); Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes(); int numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0; long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(maxWaitSecs, TimeUnit.SECONDS); while (System.nanoTime() < timeout && numLiveNodes < numNodes) { echo("\nWaiting up to "+maxWaitSecs+" seconds to see "+ (numNodes-numLiveNodes)+" more nodes join the SolrCloud cluster ..."); try { Thread.sleep(2000); } catch (InterruptedException ie) { Thread.interrupted(); } liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes(); numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0; } if (numLiveNodes < numNodes) { echo("\nWARNING: Only "+numLiveNodes+" of "+numNodes+ " are active in the cluster after "+maxWaitSecs+ " seconds! Please check the solr.log for each node to look for errors.\n"); } } catch (Exception exc) { CLIO.err("Failed to see if "+numNodes+" joined the SolrCloud cluster due to: "+exc); } finally { if (cloudClient != null) { try { cloudClient.close(); } catch (Exception ignore) {} } } } protected Map<String,Object> startSolr(File solrHomeDir, boolean cloudMode, CommandLine cli, int port, String zkHost, int maxWaitSecs) throws Exception { String extraArgs = readExtraArgs(cli.getArgs()); String host = cli.getOptionValue('h'); String memory = cli.getOptionValue('m'); String hostArg = (host != null && !"localhost".equals(host)) ? " -h "+host : ""; String zkHostArg = (zkHost != null) ? " -z "+zkHost : ""; String memArg = (memory != null) ? " -m "+memory : ""; String cloudModeArg = cloudMode ? "-cloud " : ""; String forceArg = cli.hasOption("force") ? " -force" : ""; String addlOpts = cli.getOptionValue('a'); String addlOptsArg = (addlOpts != null) ? " -a \""+addlOpts+"\"" : ""; File cwd = new File(System.getProperty("user.dir")); File binDir = (new File(script)).getParentFile(); boolean isWindows = (OS.isFamilyDOS() || OS.isFamilyWin9x() || OS.isFamilyWindows()); String callScript = (!isWindows && cwd.equals(binDir.getParentFile())) ? "bin/solr" : script; String cwdPath = cwd.getAbsolutePath(); String solrHome = solrHomeDir.getAbsolutePath(); // don't display a huge path for solr home if it is relative to the cwd if (!isWindows && cwdPath.length() > 1 && solrHome.startsWith(cwdPath)) solrHome = solrHome.substring(cwdPath.length()+1); String startCmd = String.format(Locale.ROOT, "\"%s\" start %s -p %d -s \"%s\" %s %s %s %s %s %s", callScript, cloudModeArg, port, solrHome, hostArg, zkHostArg, memArg, forceArg, extraArgs, addlOptsArg); startCmd = startCmd.replaceAll("\\s+", " ").trim(); // for pretty printing echo("\nStarting up Solr on port " + port + " using command:"); echo(startCmd + "\n"); String solrUrl = String.format(Locale.ROOT, "%s://%s:%d/solr", urlScheme, (host != null ? host : "localhost"), port); Map<String,Object> nodeStatus = checkPortConflict(solrUrl, solrHomeDir, port, cli); if (nodeStatus != null) return nodeStatus; // the server they are trying to start is already running int code = 0; if (isWindows) { // On Windows, the execution doesn't return, so we have to execute async // and when calling the script, it seems to be inheriting the environment that launched this app // so we have to prune out env vars that may cause issues Map<String,String> startEnv = new HashMap<>(); Map<String,String> procEnv = EnvironmentUtils.getProcEnvironment(); if (procEnv != null) { for (Map.Entry<String, String> entry : procEnv.entrySet()) { String envVar = entry.getKey(); String envVarVal = entry.getValue(); if (envVarVal != null && !"EXAMPLE".equals(envVar) && !envVar.startsWith("SOLR_")) { startEnv.put(envVar, envVarVal); } } } DefaultExecuteResultHandler handler = new DefaultExecuteResultHandler(); executor.execute(org.apache.commons.exec.CommandLine.parse(startCmd), startEnv, handler); // wait for execution. try { handler.waitFor(3000); } catch (InterruptedException ie) { // safe to ignore ... Thread.interrupted(); } if (handler.hasResult() && handler.getExitValue() != 0) { throw new Exception("Failed to start Solr using command: "+startCmd+" Exception : "+handler.getException()); } } else { try { code = executor.execute(org.apache.commons.exec.CommandLine.parse(startCmd)); } catch(ExecuteException e){ throw new Exception("Failed to start Solr using command: "+startCmd+" Exception : "+ e); } } if (code != 0) throw new Exception("Failed to start Solr using command: "+startCmd); return getNodeStatus(solrUrl, maxWaitSecs); } protected Map<String,Object> checkPortConflict(String solrUrl, File solrHomeDir, int port, CommandLine cli) { // quickly check if the port is in use if (isPortAvailable(port)) return null; // not in use ... try to start Map<String,Object> nodeStatus = null; try { nodeStatus = (new StatusTool()).getStatus(solrUrl); } catch (Exception ignore) { /* just trying to determine if this example is already running. */ } if (nodeStatus != null) { String solr_home = (String)nodeStatus.get("solr_home"); if (solr_home != null) { String solrHomePath = solrHomeDir.getAbsolutePath(); if (!solrHomePath.endsWith("/")) solrHomePath += "/"; if (!solr_home.endsWith("/")) solr_home += "/"; if (solrHomePath.equals(solr_home)) { CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(nodeStatus); echo("Solr is already setup and running on port " + port + " with status:\n" + arr.toString()); echo("\nIf this is not the example node you are trying to start, please choose a different port."); nodeStatus.put("baseUrl", solrUrl); return nodeStatus; } } } throw new IllegalStateException("Port "+port+" is already being used by another process."); } protected String readExtraArgs(String[] extraArgsArr) { String extraArgs = ""; if (extraArgsArr != null && extraArgsArr.length > 0) { StringBuilder sb = new StringBuilder(); int app = 0; for (int e=0; e < extraArgsArr.length; e++) { String arg = extraArgsArr[e]; if ("e".equals(arg) || "example".equals(arg)) { e++; // skip over the example arg continue; } if (app > 0) sb.append(" "); sb.append(arg); ++app; } extraArgs = sb.toString().trim(); } return extraArgs; } protected String createCloudExampleCollection(int numNodes, Scanner readInput, boolean prompt, String solrUrl) throws Exception { // yay! numNodes SolrCloud nodes running int numShards = 2; int replicationFactor = 2; String cloudConfig = "_default"; String collectionName = "gettingstarted"; File configsetsDir = new File(serverDir, "solr/configsets"); String collectionListUrl = solrUrl+"/admin/collections?action=list"; if (prompt) { echo("\nNow let's create a new collection for indexing documents in your "+numNodes+"-node cluster."); while (true) { collectionName = prompt(readInput, "Please provide a name for your new collection: ["+collectionName+"] ", collectionName); // Test for existence and then prompt to either create another or skip the create step if (safeCheckCollectionExists(collectionListUrl, collectionName)) { echo("\nCollection '"+collectionName+"' already exists!"); int oneOrTwo = promptForInt(readInput, "Do you want to re-use the existing collection or create a new one? Enter 1 to reuse, 2 to create new [1]: ", "a 1 or 2", 1, 1, 2); if (oneOrTwo == 1) { return collectionName; } else { continue; } } else { break; // user selected a collection that doesn't exist ... proceed on } } numShards = promptForInt(readInput, "How many shards would you like to split " + collectionName + " into? [2]", "a shard count", 2, 1, 4); replicationFactor = promptForInt(readInput, "How many replicas per shard would you like to create? [2] ", "a replication factor", 2, 1, 4); echo("Please choose a configuration for the "+collectionName+" collection, available options are:"); String validConfigs = "_default or sample_techproducts_configs ["+cloudConfig+"] "; cloudConfig = prompt(readInput, validConfigs, cloudConfig); // validate the cloudConfig name while (!isValidConfig(configsetsDir, cloudConfig)) { echo(cloudConfig+" is not a valid configuration directory! Please choose a configuration for the "+collectionName+" collection, available options are:"); cloudConfig = prompt(readInput, validConfigs, cloudConfig); } } else { // must verify if default collection exists if (safeCheckCollectionExists(collectionListUrl, collectionName)) { echo("\nCollection '"+collectionName+"' already exists! Skipping collection creation step."); return collectionName; } } // invoke the CreateCollectionTool String[] createArgs = new String[] { "-name", collectionName, "-shards", String.valueOf(numShards), "-replicationFactor", String.valueOf(replicationFactor), "-confname", collectionName, "-confdir", cloudConfig, "-configsetsDir", configsetsDir.getAbsolutePath(), "-solrUrl", solrUrl }; CreateCollectionTool createCollectionTool = new CreateCollectionTool(stdout); int createCode = createCollectionTool.runTool( processCommandLineArgs(joinCommonAndToolOptions(createCollectionTool.getOptions()), createArgs)); if (createCode != 0) throw new Exception("Failed to create collection using command: "+ Arrays.asList(createArgs)); return collectionName; } protected boolean isValidConfig(File configsetsDir, String config) { File configDir = new File(configsetsDir, config); if (configDir.isDirectory()) return true; // not a built-in configset ... maybe it's a custom directory? configDir = new File(config); if (configDir.isDirectory()) return true; return false; } protected Map<String,Object> getNodeStatus(String solrUrl, int maxWaitSecs) throws Exception { StatusTool statusTool = new StatusTool(); if (verbose) echo("\nChecking status of Solr at " + solrUrl + " ..."); URL solrURL = new URL(solrUrl); Map<String,Object> nodeStatus = statusTool.waitToSeeSolrUp(solrUrl, maxWaitSecs); nodeStatus.put("baseUrl", solrUrl); CharArr arr = new CharArr(); new JSONWriter(arr, 2).write(nodeStatus); String mode = (nodeStatus.get("cloud") != null) ? "cloud" : "standalone"; if (verbose) echo("\nSolr is running on "+solrURL.getPort()+" in " + mode + " mode with status:\n" + arr.toString()); return nodeStatus; } protected File setupExampleDir(File serverDir, File exampleParentDir, String dirName) throws IOException { File solrXml = new File(serverDir, "solr/solr.xml"); if (!solrXml.isFile()) throw new IllegalArgumentException("Value of -serverDir option is invalid! "+ solrXml.getAbsolutePath()+" not found!"); File zooCfg = new File(serverDir, "solr/zoo.cfg"); if (!zooCfg.isFile()) throw new IllegalArgumentException("Value of -serverDir option is invalid! "+ zooCfg.getAbsolutePath()+" not found!"); File solrHomeDir = new File(exampleParentDir, dirName+"/solr"); if (!solrHomeDir.isDirectory()) { echo("Creating Solr home directory "+solrHomeDir); solrHomeDir.mkdirs(); } else { echo("Solr home directory "+solrHomeDir.getAbsolutePath()+" already exists."); } copyIfNeeded(solrXml, new File(solrHomeDir, "solr.xml")); copyIfNeeded(zooCfg, new File(solrHomeDir, "zoo.cfg")); return solrHomeDir.getParentFile(); } protected void copyIfNeeded(File src, File dest) throws IOException { if (!dest.isFile()) FileUtils.copyFile(src, dest); if (!dest.isFile()) throw new IllegalStateException("Required file "+dest.getAbsolutePath()+" not found!"); } protected boolean isPortAvailable(int port) { Socket s = null; try { s = new Socket("localhost", port); return false; } catch (IOException e) { return true; } finally { if (s != null) { try { s.close(); } catch (IOException ignore) {} } } } protected Integer promptForPort(Scanner s, int node, String prompt, Integer defVal) { return promptForInt(s, prompt, "a port for node "+node, defVal, null, null); } protected Integer promptForInt(Scanner s, String prompt, String label, Integer defVal, Integer min, Integer max) { Integer inputAsInt = null; String value = prompt(s, prompt, null /* default is null since we handle that here */); if (value != null) { int attempts = 3; while (value != null && --attempts > 0) { try { inputAsInt = Integer.valueOf(value); if (min != null) { if (inputAsInt < min) { value = prompt(s, String.format(Locale.ROOT, PROMPT_NUMBER_TOO_SMALL, inputAsInt, label, min, max, defVal)); inputAsInt = null; continue; } } if (max != null) { if (inputAsInt > max) { value = prompt(s, String.format(Locale.ROOT, PROMPT_NUMBER_TOO_LARGE, inputAsInt, label, min, max, defVal)); inputAsInt = null; continue; } } } catch (NumberFormatException nfe) { if (verbose) echo(value+" is not a number!"); if (min != null && max != null) { value = prompt(s, String.format(Locale.ROOT, PROMPT_FOR_NUMBER_IN_RANGE, label, min, max, defVal)); } else { value = prompt(s, String.format(Locale.ROOT, PROMPT_FOR_NUMBER, label, defVal)); } } } if (attempts == 0 && value != null && inputAsInt == null) echo("Too many failed attempts! Going with default value "+defVal); } return (inputAsInt != null) ? inputAsInt : defVal; } protected String prompt(Scanner s, String prompt) { return prompt(s, prompt, null); } protected String prompt(Scanner s, String prompt, String defaultValue) { echo(prompt); String nextInput = s.nextLine(); if (nextInput != null) { nextInput = nextInput.trim(); if (nextInput.isEmpty()) nextInput = null; } return (nextInput != null) ? nextInput : defaultValue; } } // end RunExampleTool class /** * Asserts various conditions and exists with error code if fails, else continues with no output */ public static class AssertTool extends ToolBase { private static String message = null; private static boolean useExitCode = false; private static Optional<Long> timeoutMs = Optional.empty(); public AssertTool() { this(CLIO.getOutStream()); } public AssertTool(PrintStream stdout) { super(stdout); } public String getName() { return "assert"; } public Option[] getOptions() { return new Option[] { Option.builder("R") .desc("Asserts that we are NOT the root user") .longOpt("not-root") .build(), Option.builder("r") .desc("Asserts that we are the root user") .longOpt("root") .build(), Option.builder("S") .desc("Asserts that Solr is NOT running on a certain URL. Default timeout is 1000ms") .longOpt("not-started") .hasArg(true) .argName("url") .build(), Option.builder("s") .desc("Asserts that Solr is running on a certain URL. Default timeout is 1000ms") .longOpt("started") .hasArg(true) .argName("url") .build(), Option.builder("u") .desc("Asserts that we run as same user that owns <directory>") .longOpt("same-user") .hasArg(true) .argName("directory") .build(), Option.builder("x") .desc("Asserts that directory <directory> exists") .longOpt("exists") .hasArg(true) .argName("directory") .build(), Option.builder("X") .desc("Asserts that directory <directory> does NOT exist") .longOpt("not-exists") .hasArg(true) .argName("directory") .build(), Option.builder("c") .desc("Asserts that Solr is running in cloud mode. Also fails if Solr not running. URL should be for root Solr path.") .longOpt("cloud") .hasArg(true) .argName("url") .build(), Option.builder("C") .desc("Asserts that Solr is not running in cloud mode. Also fails if Solr not running. URL should be for root Solr path.") .longOpt("not-cloud") .hasArg(true) .argName("url") .build(), Option.builder("m") .desc("Exception message to be used in place of the default error message") .longOpt("message") .hasArg(true) .argName("message") .build(), Option.builder("t") .desc("Timeout in ms for commands supporting a timeout") .longOpt("timeout") .hasArg(true) .type(Long.class) .argName("ms") .build(), Option.builder("e") .desc("Return an exit code instead of printing error message on assert fail.") .longOpt("exitcode") .build() }; } public int runTool(CommandLine cli) throws Exception { verbose = cli.hasOption("verbose"); int toolExitStatus = 0; try { toolExitStatus = runAssert(cli); } catch (Exception exc) { // since this is a CLI, spare the user the stacktrace String excMsg = exc.getMessage(); if (excMsg != null) { if (verbose) { CLIO.err("\nERROR: " + exc + "\n"); } else { CLIO.err("\nERROR: " + excMsg + "\n"); } toolExitStatus = 100; // Exit >= 100 means error, else means number of tests that failed } else { throw exc; } } return toolExitStatus; } @Override protected void runImpl(CommandLine cli) throws Exception { runAssert(cli); } /** * Custom run method which may return exit code * @param cli the command line object * @return 0 on success, or a number corresponding to number of tests that failed * @throws Exception if a tool failed, e.g. authentication failure */ protected int runAssert(CommandLine cli) throws Exception { if (cli.getOptions().length == 0 || cli.getArgs().length > 0 || cli.hasOption("h")) { new HelpFormatter().printHelp("bin/solr assert [-m <message>] [-e] [-rR] [-s <url>] [-S <url>] [-c <url>] [-C <url>] [-u <dir>] [-x <dir>] [-X <dir>]", getToolOptions(this)); return 1; } if (cli.hasOption("m")) { message = cli.getOptionValue("m"); } if (cli.hasOption("t")) { timeoutMs = Optional.of(Long.parseLong(cli.getOptionValue("t"))); } if (cli.hasOption("e")) { useExitCode = true; } int ret = 0; if (cli.hasOption("r")) { ret += assertRootUser(); } if (cli.hasOption("R")) { ret += assertNotRootUser(); } if (cli.hasOption("x")) { ret += assertFileExists(cli.getOptionValue("x")); } if (cli.hasOption("X")) { ret += assertFileNotExists(cli.getOptionValue("X")); } if (cli.hasOption("u")) { ret += sameUser(cli.getOptionValue("u")); } if (cli.hasOption("s")) { ret += assertSolrRunning(cli.getOptionValue("s")); } if (cli.hasOption("S")) { ret += assertSolrNotRunning(cli.getOptionValue("S")); } if (cli.hasOption("c")) { ret += assertSolrRunningInCloudMode(cli.getOptionValue("c")); } if (cli.hasOption("C")) { ret += assertSolrNotRunningInCloudMode(cli.getOptionValue("C")); } return ret; } public static int assertSolrRunning(String url) throws Exception { StatusTool status = new StatusTool(); try { status.waitToSeeSolrUp(url, timeoutMs.orElse(1000L).intValue() / 1000); } catch (Exception se) { if (exceptionIsAuthRelated(se)) { throw se; } return exitOrException("Solr is not running on url " + url + " after " + timeoutMs.orElse(1000L) / 1000 + "s"); } return 0; } public static int assertSolrNotRunning(String url) throws Exception { StatusTool status = new StatusTool(); long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutMs.orElse(1000L), TimeUnit.MILLISECONDS); try { attemptHttpHead(url, getHttpClient()); } catch (SolrException se) { throw se; // Auth error } catch (IOException e) { log.debug("Opening connection to {} failed, Solr does not seem to be running", url, e); return 0; } while (System.nanoTime() < timeout) { try { status.waitToSeeSolrUp(url, 1); try { log.debug("Solr still up. Waiting before trying again to see if it was stopped"); Thread.sleep(1000L); } catch (InterruptedException interrupted) { timeout = 0; // stop looping } } catch (Exception se) { if (exceptionIsAuthRelated(se)) { throw se; } return exitOrException(se.getMessage()); } } return exitOrException("Solr is still running at " + url + " after " + timeoutMs.orElse(1000L) / 1000 + "s"); } public static int assertSolrRunningInCloudMode(String url) throws Exception { if (! isSolrRunningOn(url)) { return exitOrException("Solr is not running on url " + url + " after " + timeoutMs.orElse(1000L) / 1000 + "s"); } if (! runningSolrIsCloud(url)) { return exitOrException("Solr is not running in cloud mode on " + url); } return 0; } public static int assertSolrNotRunningInCloudMode(String url) throws Exception { if (! isSolrRunningOn(url)) { return exitOrException("Solr is not running on url " + url + " after " + timeoutMs.orElse(1000L) / 1000 + "s"); } if (runningSolrIsCloud(url)) { return exitOrException("Solr is not running in standalone mode on " + url); } return 0; } public static int sameUser(String directory) throws Exception { if (Files.exists(Paths.get(directory))) { String userForDir = userForDir(Paths.get(directory)); if (!currentUser().equals(userForDir)) { return exitOrException("Must run as user " + userForDir + ". We are " + currentUser()); } } else { return exitOrException("Directory " + directory + " does not exist."); } return 0; } public static int assertFileExists(String directory) throws Exception { if (! Files.exists(Paths.get(directory))) { return exitOrException("Directory " + directory + " does not exist."); } return 0; } public static int assertFileNotExists(String directory) throws Exception { if (Files.exists(Paths.get(directory))) { return exitOrException("Directory " + directory + " should not exist."); } return 0; } public static int assertRootUser() throws Exception { if (!currentUser().equals("root")) { return exitOrException("Must run as root user"); } return 0; } public static int assertNotRootUser() throws Exception { if (currentUser().equals("root")) { return exitOrException("Not allowed to run as root user"); } return 0; } public static String currentUser() { return System.getProperty("user.name"); } public static String userForDir(Path pathToDir) { try { FileOwnerAttributeView ownerAttributeView = Files.getFileAttributeView(pathToDir, FileOwnerAttributeView.class); return ownerAttributeView.getOwner().getName(); } catch (IOException e) { return "N/A"; } } private static int exitOrException(String msg) throws AssertionFailureException { if (useExitCode) { return 1; } else { throw new AssertionFailureException(message != null ? message : msg); } } private static boolean isSolrRunningOn(String url) throws Exception { StatusTool status = new StatusTool(); try { status.waitToSeeSolrUp(url, timeoutMs.orElse(1000L).intValue() / 1000); return true; } catch (Exception se) { if (exceptionIsAuthRelated(se)) { throw se; } return false; } } private static boolean runningSolrIsCloud(String url) throws Exception { try (final HttpSolrClient client = new HttpSolrClient.Builder(url).build()) { final SolrRequest<CollectionAdminResponse> request = new CollectionAdminRequest.ClusterStatus(); final CollectionAdminResponse response = request.process(client); return response != null; } catch (Exception e) { if (exceptionIsAuthRelated(e)) { throw e; } return false; } } } // end AssertTool class public static class AssertionFailureException extends Exception { public AssertionFailureException(String message) { super(message); } } // Authentication tool public static class AuthTool extends ToolBase { public AuthTool() { this(CLIO.getOutStream()); } public AuthTool(PrintStream stdout) { super(stdout); } public String getName() { return "auth"; } List<String> authenticationVariables = Arrays.asList("SOLR_AUTHENTICATION_CLIENT_BUILDER", "SOLR_AUTH_TYPE", "SOLR_AUTHENTICATION_OPTS"); public Option[] getOptions() { return new Option[]{ Option.builder("type") .argName("type") .hasArg() .desc("The authentication mechanism to enable (basicAuth or kerberos). Defaults to 'basicAuth'.") .build(), Option.builder("credentials") .argName("credentials") .hasArg() .desc("Credentials in the format username:password. Example: -credentials solr:SolrRocks") .build(), Option.builder("prompt") .argName("prompt") .hasArg() .desc("Prompts the user to provide the credentials. Use either -credentials or -prompt, not both") .build(), Option.builder("config") .argName("config") .hasArgs() .desc("Configuration parameters (Solr startup parameters). Required for Kerberos authentication") .build(), Option.builder("blockUnknown") .argName("blockUnknown") .desc("Blocks all access for unknown users (requires authentication for all endpoints)") .hasArg() .build(), Option.builder("solrIncludeFile") .argName("solrIncludeFile") .hasArg() .desc("The Solr include file which contains overridable environment variables for configuring Solr configurations") .build(), Option.builder("updateIncludeFileOnly") .argName("updateIncludeFileOnly") .desc("Only update the solr.in.sh or solr.in.cmd file, and skip actual enabling/disabling" + " authentication (i.e. don't update security.json)") .hasArg() .build(), Option.builder("authConfDir") .argName("authConfDir") .hasArg() .required() .desc("This is where any authentication related configuration files, if any, would be placed.") .build(), Option.builder("solrUrl") .argName("solrUrl") .hasArg() .desc("Solr URL") .build(), Option.builder("zkHost") .argName("zkHost") .hasArg() .desc("ZooKeeper host") .build(), Option.builder("verbose") .required(false) .desc("Enable more verbose command output.") .build() }; } private void ensureArgumentIsValidBooleanIfPresent(CommandLine cli, String argName) { if (cli.hasOption(argName)) { final String value = cli.getOptionValue(argName); final Boolean parsedBoolean = BooleanUtils.toBooleanObject(value); if (parsedBoolean == null) { echo("Argument [" + argName + "] must be either true or false, but was [" + value + "]"); exit(1); } } } @Override public int runTool(CommandLine cli) throws Exception { raiseLogLevelUnlessVerbose(cli); if (cli.getOptions().length == 0 || cli.getArgs().length == 0 || cli.getArgs().length > 1 || cli.hasOption("h")) { new HelpFormatter().printHelp("bin/solr auth <enable|disable> [OPTIONS]", getToolOptions(this)); return 1; } ensureArgumentIsValidBooleanIfPresent(cli, "blockUnknown"); ensureArgumentIsValidBooleanIfPresent(cli, "updateIncludeFileOnly"); String type = cli.getOptionValue("type", "basicAuth"); switch (type) { case "basicAuth": return handleBasicAuth(cli); case "kerberos": return handleKerberos(cli); default: CLIO.out("Only type=basicAuth or kerberos supported at the moment."); exit(1); } return 1; } private int handleKerberos(CommandLine cli) throws Exception { String cmd = cli.getArgs()[0]; boolean updateIncludeFileOnly = Boolean.parseBoolean(cli.getOptionValue("updateIncludeFileOnly", "false")); String securityJson = "{" + "\n \"authentication\":{" + "\n \"class\":\"solr.KerberosPlugin\"" + "\n }" + "\n}"; switch (cmd) { case "enable": String zkHost = null; boolean zkInaccessible = false; if (!updateIncludeFileOnly) { try { zkHost = getZkHost(cli); } catch (Exception ex) { CLIO.out("Unable to access ZooKeeper. Please add the following security.json to ZooKeeper (in case of SolrCloud):\n" + securityJson + "\n"); zkInaccessible = true; } if (zkHost == null) { if (zkInaccessible == false) { CLIO.out("Unable to access ZooKeeper. Please add the following security.json to ZooKeeper (in case of SolrCloud):\n" + securityJson + "\n"); zkInaccessible = true; } } // check if security is already enabled or not if (!zkInaccessible) { try (SolrZkClient zkClient = new SolrZkClient(zkHost, 10000)) { if (zkClient.exists("/security.json", true)) { byte oldSecurityBytes[] = zkClient.getData("/security.json", null, null, true); if (!"{}".equals(new String(oldSecurityBytes, StandardCharsets.UTF_8).trim())) { CLIO.out("Security is already enabled. You can disable it with 'bin/solr auth disable'. Existing security.json: \n" + new String(oldSecurityBytes, StandardCharsets.UTF_8)); exit(1); } } } catch (Exception ex) { if (zkInaccessible == false) { CLIO.out("Unable to access ZooKeeper. Please add the following security.json to ZooKeeper (in case of SolrCloud):\n" + securityJson + "\n"); zkInaccessible = true; } } } } if (!updateIncludeFileOnly) { if (!zkInaccessible) { echoIfVerbose("Uploading following security.json: " + securityJson, cli); try (SolrZkClient zkClient = new SolrZkClient(zkHost, 10000)) { zkClient.setData("/security.json", securityJson.getBytes(StandardCharsets.UTF_8), true); } catch (Exception ex) { if (zkInaccessible == false) { CLIO.out("Unable to access ZooKeeper. Please add the following security.json to ZooKeeper (in case of SolrCloud):\n" + securityJson); zkInaccessible = true; } } } } String config = StrUtils.join(Arrays.asList(cli.getOptionValues("config")), ' '); // config is base64 encoded (to get around parsing problems), decode it config = config.replaceAll(" ", ""); config = new String(Base64.getDecoder() .decode(config.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); config = config.replaceAll("\n", "").replaceAll("\r", ""); String solrIncludeFilename = cli.getOptionValue("solrIncludeFile"); File includeFile = new File(solrIncludeFilename); if (includeFile.exists() == false || includeFile.canWrite() == false) { CLIO.out("Solr include file " + solrIncludeFilename + " doesn't exist or is not writeable."); printAuthEnablingInstructions(config); System.exit(0); } // update the solr.in.sh file to contain the necessary authentication lines updateIncludeFileEnableAuth(includeFile, null, config, cli); echo("Successfully enabled Kerberos authentication; please restart any running Solr nodes."); return 0; case "disable": if (!updateIncludeFileOnly) { zkHost = getZkHost(cli); if (zkHost == null) { stdout.print("ZK Host not found. Solr should be running in cloud mode"); exit(1); } echoIfVerbose("Uploading following security.json: {}", cli); try (SolrZkClient zkClient = new SolrZkClient(zkHost, 10000)) { zkClient.setData("/security.json", "{}".getBytes(StandardCharsets.UTF_8), true); } } solrIncludeFilename = cli.getOptionValue("solrIncludeFile"); includeFile = new File(solrIncludeFilename); if (!includeFile.exists() || !includeFile.canWrite()) { CLIO.out("Solr include file " + solrIncludeFilename + " doesn't exist or is not writeable."); CLIO.out("Security has been disabled. Please remove any SOLR_AUTH_TYPE or SOLR_AUTHENTICATION_OPTS configuration from solr.in.sh/solr.in.cmd.\n"); System.exit(0); } // update the solr.in.sh file to comment out the necessary authentication lines updateIncludeFileDisableAuth(includeFile, cli); return 0; default: CLIO.out("Valid auth commands are: enable, disable"); exit(1); } CLIO.out("Options not understood."); new HelpFormatter().printHelp("bin/solr auth <enable|disable> [OPTIONS]", getToolOptions(this)); return 1; } private int handleBasicAuth(CommandLine cli) throws Exception { String cmd = cli.getArgs()[0]; boolean prompt = Boolean.parseBoolean(cli.getOptionValue("prompt", "false")); boolean updateIncludeFileOnly = Boolean.parseBoolean(cli.getOptionValue("updateIncludeFileOnly", "false")); switch (cmd) { case "enable": if (!prompt && !cli.hasOption("credentials")) { CLIO.out("Option -credentials or -prompt is required with enable."); new HelpFormatter().printHelp("bin/solr auth <enable|disable> [OPTIONS]", getToolOptions(this)); exit(1); } else if (!prompt && (cli.getOptionValue("credentials") == null || !cli.getOptionValue("credentials").contains(":"))) { CLIO.out("Option -credentials is not in correct format."); new HelpFormatter().printHelp("bin/solr auth <enable|disable> [OPTIONS]", getToolOptions(this)); exit(1); } String zkHost = null; if (!updateIncludeFileOnly) { try { zkHost = getZkHost(cli); } catch (Exception ex) { if (cli.hasOption("zkHost")) { CLIO.out("Couldn't get ZooKeeper host. Please make sure that ZooKeeper is running and the correct zkHost has been passed in."); } else { CLIO.out("Couldn't get ZooKeeper host. Please make sure Solr is running in cloud mode, or a zkHost has been passed in."); } exit(1); } if (zkHost == null) { if (cli.hasOption("zkHost")) { CLIO.out("Couldn't get ZooKeeper host. Please make sure that ZooKeeper is running and the correct zkHost has been passed in."); } else { CLIO.out("Couldn't get ZooKeeper host. Please make sure Solr is running in cloud mode, or a zkHost has been passed in."); } exit(1); } // check if security is already enabled or not try (SolrZkClient zkClient = new SolrZkClient(zkHost, 10000)) { if (zkClient.exists("/security.json", true)) { byte oldSecurityBytes[] = zkClient.getData("/security.json", null, null, true); if (!"{}".equals(new String(oldSecurityBytes, StandardCharsets.UTF_8).trim())) { CLIO.out("Security is already enabled. You can disable it with 'bin/solr auth disable'. Existing security.json: \n" + new String(oldSecurityBytes, StandardCharsets.UTF_8)); exit(1); } } } } String username, password; if (cli.hasOption("credentials")) { String credentials = cli.getOptionValue("credentials"); username = credentials.split(":")[0]; password = credentials.split(":")[1]; } else { Console console = System.console(); username = console.readLine("Enter username: "); password = new String(console.readPassword("Enter password: ")); } boolean blockUnknown = Boolean.valueOf(cli.getOptionValue("blockUnknown", "true")); String securityJson = "{" + "\n \"authentication\":{" + "\n \"blockUnknown\": " + blockUnknown + "," + "\n \"class\":\"solr.BasicAuthPlugin\"," + "\n \"credentials\":{\"" + username + "\":\"" + Sha256AuthenticationProvider.getSaltedHashedValue(password) + "\"}" + "\n }," + "\n \"authorization\":{" + "\n \"class\":\"solr.RuleBasedAuthorizationPlugin\"," + "\n \"permissions\":[" + "\n {\"name\":\"security-edit\", \"role\":\"admin\"}," + "\n {\"name\":\"collection-admin-edit\", \"role\":\"admin\"}," + "\n {\"name\":\"core-admin-edit\", \"role\":\"admin\"}" + "\n ]," + "\n \"user-role\":{\"" + username + "\":\"admin\"}" + "\n }" + "\n}"; if (!updateIncludeFileOnly) { echoIfVerbose("Uploading following security.json: " + securityJson, cli); try (SolrZkClient zkClient = new SolrZkClient(zkHost, 10000)) { zkClient.setData("/security.json", securityJson.getBytes(StandardCharsets.UTF_8), true); } } String solrIncludeFilename = cli.getOptionValue("solrIncludeFile"); File includeFile = new File(solrIncludeFilename); if (includeFile.exists() == false || includeFile.canWrite() == false) { CLIO.out("Solr include file " + solrIncludeFilename + " doesn't exist or is not writeable."); printAuthEnablingInstructions(username, password); System.exit(0); } String authConfDir = cli.getOptionValue("authConfDir"); File basicAuthConfFile = new File(authConfDir + File.separator + "basicAuth.conf"); if (basicAuthConfFile.getParentFile().canWrite() == false) { CLIO.out("Cannot write to file: " + basicAuthConfFile.getAbsolutePath()); printAuthEnablingInstructions(username, password); System.exit(0); } FileUtils.writeStringToFile(basicAuthConfFile, "httpBasicAuthUser=" + username + "\nhttpBasicAuthPassword=" + password, StandardCharsets.UTF_8); // update the solr.in.sh file to contain the necessary authentication lines updateIncludeFileEnableAuth(includeFile, basicAuthConfFile.getAbsolutePath(), null, cli); final String successMessage = String.format(Locale.ROOT, "Successfully enabled basic auth with username [%s] and password [%s].", username, password); echo(successMessage); return 0; case "disable": if (!updateIncludeFileOnly) { zkHost = getZkHost(cli); if (zkHost == null) { stdout.print("ZK Host not found. Solr should be running in cloud mode"); exit(1); } echoIfVerbose("Uploading following security.json: {}", cli); try (SolrZkClient zkClient = new SolrZkClient(zkHost, 10000)) { zkClient.setData("/security.json", "{}".getBytes(StandardCharsets.UTF_8), true); } } solrIncludeFilename = cli.getOptionValue("solrIncludeFile"); includeFile = new File(solrIncludeFilename); if (!includeFile.exists() || !includeFile.canWrite()) { CLIO.out("Solr include file " + solrIncludeFilename + " doesn't exist or is not writeable."); CLIO.out("Security has been disabled. Please remove any SOLR_AUTH_TYPE or SOLR_AUTHENTICATION_OPTS configuration from solr.in.sh/solr.in.cmd.\n"); System.exit(0); } // update the solr.in.sh file to comment out the necessary authentication lines updateIncludeFileDisableAuth(includeFile, cli); return 0; default: CLIO.out("Valid auth commands are: enable, disable"); exit(1); } CLIO.out("Options not understood."); new HelpFormatter().printHelp("bin/solr auth <enable|disable> [OPTIONS]", getToolOptions(this)); return 1; } private void printAuthEnablingInstructions(String username, String password) { if (SystemUtils.IS_OS_WINDOWS) { CLIO.out("\nAdd the following lines to the solr.in.cmd file so that the solr.cmd script can use subsequently.\n"); CLIO.out("set SOLR_AUTH_TYPE=basic\n" + "set SOLR_AUTHENTICATION_OPTS=\"-Dbasicauth=" + username + ":" + password + "\"\n"); } else { CLIO.out("\nAdd the following lines to the solr.in.sh file so that the ./solr script can use subsequently.\n"); CLIO.out("SOLR_AUTH_TYPE=\"basic\"\n" + "SOLR_AUTHENTICATION_OPTS=\"-Dbasicauth=" + username + ":" + password + "\"\n"); } } private void printAuthEnablingInstructions(String kerberosConfig) { if (SystemUtils.IS_OS_WINDOWS) { CLIO.out("\nAdd the following lines to the solr.in.cmd file so that the solr.cmd script can use subsequently.\n"); CLIO.out("set SOLR_AUTH_TYPE=kerberos\n" + "set SOLR_AUTHENTICATION_OPTS=\"" + kerberosConfig + "\"\n"); } else { CLIO.out("\nAdd the following lines to the solr.in.sh file so that the ./solr script can use subsequently.\n"); CLIO.out("SOLR_AUTH_TYPE=\"kerberos\"\n" + "SOLR_AUTHENTICATION_OPTS=\"" + kerberosConfig + "\"\n"); } } /** * This will update the include file (e.g. solr.in.sh / solr.in.cmd) with the authentication parameters. * @param includeFile The include file * @param basicAuthConfFile If basicAuth, the path of the file containing credentials. If not, null. * @param kerberosConfig If kerberos, the config string containing startup parameters. If not, null. */ private void updateIncludeFileEnableAuth(File includeFile, String basicAuthConfFile, String kerberosConfig, CommandLine cli) throws IOException { assert !(basicAuthConfFile != null && kerberosConfig != null); // only one of the two needs to be populated List<String> includeFileLines = FileUtils.readLines(includeFile, StandardCharsets.UTF_8); for (int i=0; i<includeFileLines.size(); i++) { String line = includeFileLines.get(i); if (authenticationVariables.contains(line.trim().split("=")[0].trim())) { // Non-Windows includeFileLines.set(i, "# " + line); } if (line.trim().split("=")[0].trim().startsWith("set ") && authenticationVariables.contains(line.trim().split("=")[0].trim().substring(4))) { // Windows includeFileLines.set(i, "REM " + line); } } includeFileLines.add(""); // blank line if (basicAuthConfFile != null) { // for basicAuth if (SystemUtils.IS_OS_WINDOWS) { includeFileLines.add("REM The following lines added by solr.cmd for enabling BasicAuth"); includeFileLines.add("set SOLR_AUTH_TYPE=basic"); includeFileLines.add("set SOLR_AUTHENTICATION_OPTS=\"-Dsolr.httpclient.config=" + basicAuthConfFile + "\""); } else { includeFileLines.add("# The following lines added by ./solr for enabling BasicAuth"); includeFileLines.add("SOLR_AUTH_TYPE=\"basic\""); includeFileLines.add("SOLR_AUTHENTICATION_OPTS=\"-Dsolr.httpclient.config=" + basicAuthConfFile + "\""); } } else { // for kerberos if (SystemUtils.IS_OS_WINDOWS) { includeFileLines.add("REM The following lines added by solr.cmd for enabling BasicAuth"); includeFileLines.add("set SOLR_AUTH_TYPE=kerberos"); includeFileLines.add("set SOLR_AUTHENTICATION_OPTS=\"-Dsolr.httpclient.config=" + basicAuthConfFile + "\""); } else { includeFileLines.add("# The following lines added by ./solr for enabling BasicAuth"); includeFileLines.add("SOLR_AUTH_TYPE=\"kerberos\""); includeFileLines.add("SOLR_AUTHENTICATION_OPTS=\"" + kerberosConfig + "\""); } } FileUtils.writeLines(includeFile, StandardCharsets.UTF_8.name(), includeFileLines); if (basicAuthConfFile != null) { echoIfVerbose("Written out credentials file: " + basicAuthConfFile, cli); } echoIfVerbose("Updated Solr include file: " + includeFile.getAbsolutePath(), cli); } private void updateIncludeFileDisableAuth(File includeFile, CommandLine cli) throws IOException { List<String> includeFileLines = FileUtils.readLines(includeFile, StandardCharsets.UTF_8); boolean hasChanged = false; for (int i=0; i<includeFileLines.size(); i++) { String line = includeFileLines.get(i); if (authenticationVariables.contains(line.trim().split("=")[0].trim())) { // Non-Windows includeFileLines.set(i, "# " + line); hasChanged = true; } if (line.trim().split("=")[0].trim().startsWith("set ") && authenticationVariables.contains(line.trim().split("=")[0].trim().substring(4))) { // Windows includeFileLines.set(i, "REM " + line); hasChanged = true; } } if (hasChanged) { FileUtils.writeLines(includeFile, StandardCharsets.UTF_8.name(), includeFileLines); echoIfVerbose("Commented out necessary lines from " + includeFile.getAbsolutePath(), cli); } } @Override protected void runImpl(CommandLine cli) throws Exception {} } public static class UtilsTool extends ToolBase { private Path serverPath; private Path logsPath; private boolean beQuiet; public UtilsTool() { this(CLIO.getOutStream()); } public UtilsTool(PrintStream stdout) { super(stdout); } public String getName() { return "utils"; } public Option[] getOptions() { return new Option[]{ Option.builder("s") .argName("path") .hasArg() .desc("Path to server dir. Required if logs path is relative") .build(), Option.builder("l") .argName("path") .hasArg() .desc("Path to logs dir. If relative, also provide server dir with -s") .build(), Option.builder("q") .desc("Be quiet, don't print to stdout, only return exit codes") .build(), Option.builder("remove_old_solr_logs") .argName("daysToKeep") .hasArg() .type(Integer.class) .desc("Path to logs directory") .build(), Option.builder("rotate_solr_logs") .argName("generations") .hasArg() .type(Integer.class) .desc("Rotate solr.log to solr.log.1 etc") .build(), Option.builder("archive_gc_logs") .desc("Archive old garbage collection logs into archive/") .build(), Option.builder("archive_console_logs") .desc("Archive old console logs into archive/") .build() }; } @Override public int runTool(CommandLine cli) throws Exception { if (cli.getOptions().length == 0 || cli.getArgs().length > 0 || cli.hasOption("h")) { new HelpFormatter().printHelp("bin/solr utils [OPTIONS]", getToolOptions(this)); return 1; } if (cli.hasOption("s")) { serverPath = Paths.get(cli.getOptionValue("s")); } if (cli.hasOption("l")) { logsPath = Paths.get(cli.getOptionValue("l")); } if (cli.hasOption("q")) { beQuiet = cli.hasOption("q"); } if (cli.hasOption("remove_old_solr_logs")) { if (removeOldSolrLogs(Integer.parseInt(cli.getOptionValue("remove_old_solr_logs"))) > 0) return 1; } if (cli.hasOption("rotate_solr_logs")) { if (rotateSolrLogs(Integer.parseInt(cli.getOptionValue("rotate_solr_logs"))) > 0) return 1; } if (cli.hasOption("archive_gc_logs")) { if (archiveGcLogs() > 0) return 1; } if (cli.hasOption("archive_console_logs")) { if (archiveConsoleLogs() > 0) return 1; } return 0; } /** * Moves gc logs into archived/ * @return 0 on success * @throws Exception on failure */ public int archiveGcLogs() throws Exception { prepareLogsPath(); Path archivePath = logsPath.resolve("archived"); if (!archivePath.toFile().exists()) { Files.createDirectories(archivePath); } List<Path> archived = Files.find(archivePath, 1, (f, a) -> a.isRegularFile() && String.valueOf(f.getFileName()).matches("^solr_gc[_.].+")) .collect(Collectors.toList()); for (Path p : archived) { Files.delete(p); } List<Path> files = Files.find(logsPath, 1, (f, a) -> a.isRegularFile() && String.valueOf(f.getFileName()).matches("^solr_gc[_.].+")) .collect(Collectors.toList()); if (files.size() > 0) { out("Archiving " + files.size() + " old GC log files to " + archivePath); for (Path p : files) { Files.move(p, archivePath.resolve(p.getFileName()), StandardCopyOption.REPLACE_EXISTING); } } return 0; } /** * Moves console log(s) into archiced/ * @return 0 on success * @throws Exception on failure */ public int archiveConsoleLogs() throws Exception { prepareLogsPath(); Path archivePath = logsPath.resolve("archived"); if (!archivePath.toFile().exists()) { Files.createDirectories(archivePath); } List<Path> archived = Files.find(archivePath, 1, (f, a) -> a.isRegularFile() && String.valueOf(f.getFileName()).endsWith("-console.log")) .collect(Collectors.toList()); for (Path p : archived) { Files.delete(p); } List<Path> files = Files.find(logsPath, 1, (f, a) -> a.isRegularFile() && String.valueOf(f.getFileName()).endsWith("-console.log")) .collect(Collectors.toList()); if (files.size() > 0) { out("Archiving " + files.size() + " console log files to " + archivePath); for (Path p : files) { Files.move(p, archivePath.resolve(p.getFileName()), StandardCopyOption.REPLACE_EXISTING); } } return 0; } /** * Rotates solr.log before starting Solr. Mimics log4j2 behavior, i.e. with generations=9: * <pre> * solr.log.9 (and higher) are deleted * solr.log.8 -&gt; solr.log.9 * solr.log.7 -&gt; solr.log.8 * ... * solr.log -&gt; solr.log.1 * </pre> * @param generations number of generations to keep. Should agree with setting in log4j2.xml * @return 0 if success * @throws Exception if problems */ public int rotateSolrLogs(int generations) throws Exception { prepareLogsPath(); if (logsPath.toFile().exists() && logsPath.resolve("solr.log").toFile().exists()) { out("Rotating solr logs, keeping a max of "+generations+" generations"); try (Stream<Path> files = Files.find(logsPath, 1, (f, a) -> a.isRegularFile() && String.valueOf(f.getFileName()).startsWith("solr.log.")) .sorted((b,a) -> Integer.valueOf(a.getFileName().toString().substring(9)) .compareTo(Integer.valueOf(b.getFileName().toString().substring(9))))) { files.forEach(p -> { try { int number = Integer.parseInt(p.getFileName().toString().substring(9)); if (number >= generations) { Files.delete(p); } else { Path renamed = p.getParent().resolve("solr.log." + (number + 1)); Files.move(p, renamed); } } catch (IOException e) { out("Problem during rotation of log files: " + e.getMessage()); } }); } catch (NumberFormatException nfe) { throw new Exception("Do not know how to rotate solr.log.<ext> with non-numeric extension. Rotate aborted.", nfe); } Files.move(logsPath.resolve("solr.log"), logsPath.resolve("solr.log.1")); } return 0; } /** * Deletes time-stamped old solr logs, if older than n days * @param daysToKeep number of days logs to keep before deleting * @return 0 on success * @throws Exception on failure */ public int removeOldSolrLogs(int daysToKeep) throws Exception { prepareLogsPath(); if (logsPath.toFile().exists()) { try (Stream<Path> stream = Files.find(logsPath, 2, (f, a) -> a.isRegularFile() && Instant.now().minus(Period.ofDays(daysToKeep)).isAfter(a.lastModifiedTime().toInstant()) && String.valueOf(f.getFileName()).startsWith("solr_log_"))) { List<Path> files = stream.collect(Collectors.toList()); if (files.size() > 0) { out("Deleting "+files.size() + " solr_log_* files older than " + daysToKeep + " days."); for (Path p : files) { Files.delete(p); } } } } return 0; } // Private methods to follow private void out(String message) { if (!beQuiet) { stdout.print(message + "\n"); } } private void prepareLogsPath() throws Exception { if (logsPath == null) { throw new Exception("Command requires the -l <log-directory> option"); } if (!logsPath.isAbsolute()) { if (serverPath != null && serverPath.isAbsolute() && Files.exists(serverPath)) { logsPath = serverPath.resolve(logsPath); } else { throw new Exception("Logs directory must be an absolute path, or -s must be supplied"); } } } @Override protected void runImpl(CommandLine cli) throws Exception { } public void setLogPath(Path logsPath) { this.logsPath = logsPath; } public void setServerPath(Path serverPath) { this.serverPath = serverPath; } public void setQuiet(boolean shouldPrintStdout) { this.beQuiet = shouldPrintStdout; } } // end UtilsTool class }
1
39,930
ZK is properly spelled 'ZooKeeper' (capital 'K').
apache-lucene-solr
java
@@ -73,6 +73,8 @@ public class RubyGapicSurfaceTransformer implements ModelToViewTransformer { ImmutableList.of("GOOGLE_CLOUD_KEYFILE", "GCLOUD_KEYFILE"); private static final List<String> DEFAULT_JSON_ENV_VARS = ImmutableList.of("GOOGLE_CLOUD_KEYFILE_JSON", "GCLOUD_KEYFILE_JSON"); + private static final List<String> DEFAULT_PATHS = + ImmutableList.of("~/.config/gcloud/application_default_credentials.json"); private final GapicCodePathMapper pathMapper; private final PackageMetadataConfig packageConfig;
1
/* Copyright 2017 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.ruby; import com.google.api.codegen.GeneratorVersionProvider; import com.google.api.codegen.InterfaceView; import com.google.api.codegen.TargetLanguage; import com.google.api.codegen.config.GapicInterfaceConfig; import com.google.api.codegen.config.GapicProductConfig; import com.google.api.codegen.config.PackageMetadataConfig; import com.google.api.codegen.config.ProductServiceConfig; import com.google.api.codegen.gapic.GapicCodePathMapper; import com.google.api.codegen.ruby.RubyUtil; import com.google.api.codegen.transformer.BatchingTransformer; import com.google.api.codegen.transformer.DynamicLangApiMethodTransformer; import com.google.api.codegen.transformer.FeatureConfig; import com.google.api.codegen.transformer.FileHeaderTransformer; import com.google.api.codegen.transformer.GapicInterfaceContext; import com.google.api.codegen.transformer.GrpcStubTransformer; import com.google.api.codegen.transformer.ModelToViewTransformer; import com.google.api.codegen.transformer.ModelTypeTable; import com.google.api.codegen.transformer.PageStreamingTransformer; import com.google.api.codegen.transformer.PathTemplateTransformer; import com.google.api.codegen.transformer.ServiceTransformer; import com.google.api.codegen.transformer.SurfaceNamer; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.ruby.RubyTypeTable; import com.google.api.codegen.viewmodel.ApiMethodView; import com.google.api.codegen.viewmodel.CredentialsClassFileView; import com.google.api.codegen.viewmodel.CredentialsClassView; import com.google.api.codegen.viewmodel.DynamicLangXApiView; import com.google.api.codegen.viewmodel.GrpcStreamingDetailView; import com.google.api.codegen.viewmodel.ImportFileView; import com.google.api.codegen.viewmodel.ImportSectionView; import com.google.api.codegen.viewmodel.ImportTypeView; import com.google.api.codegen.viewmodel.LongRunningOperationDetailView; import com.google.api.codegen.viewmodel.PathTemplateGetterFunctionView; import com.google.api.codegen.viewmodel.ViewModel; import com.google.api.codegen.viewmodel.metadata.ModuleView; import com.google.api.codegen.viewmodel.metadata.SimpleModuleView; import com.google.api.codegen.viewmodel.metadata.VersionIndexRequireView; import com.google.api.codegen.viewmodel.metadata.VersionIndexType; import com.google.api.codegen.viewmodel.metadata.VersionIndexView; import com.google.api.tools.framework.model.Interface; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.Model; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import java.io.File; import java.util.ArrayList; import java.util.List; /** The ModelToViewTransformer to transform a Model into the standard GAPIC surface in Ruby. */ public class RubyGapicSurfaceTransformer implements ModelToViewTransformer { private static final String VERSION_INDEX_TEMPLATE_FILE = "ruby/version_index.snip"; private static final String XAPI_TEMPLATE_FILENAME = "ruby/main.snip"; private static final String CREDENTIALS_CLASS_TEMPLATE_FILE = "ruby/credentials.snip"; // This assumes the api is a google-cloud api. private static final List<String> DEFAULT_PATH_ENV_VARS = ImmutableList.of("GOOGLE_CLOUD_KEYFILE", "GCLOUD_KEYFILE"); private static final List<String> DEFAULT_JSON_ENV_VARS = ImmutableList.of("GOOGLE_CLOUD_KEYFILE_JSON", "GCLOUD_KEYFILE_JSON"); private final GapicCodePathMapper pathMapper; private final PackageMetadataConfig packageConfig; private final FileHeaderTransformer fileHeaderTransformer = new FileHeaderTransformer(new RubyImportSectionTransformer()); private final DynamicLangApiMethodTransformer apiMethodTransformer = new DynamicLangApiMethodTransformer(new RubyApiMethodParamTransformer()); private final ServiceTransformer serviceTransformer = new ServiceTransformer(); private final GrpcStubTransformer grpcStubTransformer = new GrpcStubTransformer(); private final PageStreamingTransformer pageStreamingTransformer = new PageStreamingTransformer(); private final BatchingTransformer batchingTransformer = new BatchingTransformer(); private final PathTemplateTransformer pathTemplateTransformer = new PathTemplateTransformer(); public RubyGapicSurfaceTransformer( GapicCodePathMapper pathMapper, PackageMetadataConfig packageConfig) { this.pathMapper = pathMapper; this.packageConfig = packageConfig; } @Override public List<String> getTemplateFileNames() { return ImmutableList.of( XAPI_TEMPLATE_FILENAME, VERSION_INDEX_TEMPLATE_FILE, CREDENTIALS_CLASS_TEMPLATE_FILE); } @Override public List<ViewModel> transform(Model model, GapicProductConfig productConfig) { ImmutableList.Builder<ViewModel> views = ImmutableList.builder(); views.add(generateVersionIndexView(model, productConfig)); if (RubyUtil.hasMajorVersion(productConfig.getPackageName())) { views.add(generateTopLevelIndexView(model, productConfig)); } views.addAll(generateApiClasses(model, productConfig)); if (!RubyUtil.isLongrunning(productConfig.getPackageName())) { views.add(generateCredentialsView(model, productConfig)); } return views.build(); } private List<ViewModel> generateApiClasses(Model model, GapicProductConfig productConfig) { SurfaceNamer namer = new RubySurfaceNamer(productConfig.getPackageName()); FeatureConfig featureConfig = new RubyFeatureConfig(); ImmutableList.Builder<ViewModel> serviceSurfaces = ImmutableList.builder(); for (Interface apiInterface : new InterfaceView().getElementIterable(model)) { ModelTypeTable modelTypeTable = new ModelTypeTable( new RubyTypeTable(productConfig.getPackageName()), new RubyModelTypeNameConverter(productConfig.getPackageName())); GapicInterfaceContext context = GapicInterfaceContext.create( apiInterface, productConfig, modelTypeTable, namer, featureConfig); serviceSurfaces.add(generateApiClass(context)); } return serviceSurfaces.build(); } private ViewModel generateApiClass(GapicInterfaceContext context) { SurfaceNamer namer = context.getNamer(); String subPath = pathMapper.getOutputPath(context.getInterface(), context.getProductConfig()); String name = namer.getApiWrapperClassName(context.getInterfaceConfig()); List<ApiMethodView> methods = generateApiMethods(context); DynamicLangXApiView.Builder xapiClass = DynamicLangXApiView.newBuilder(); xapiClass.templateFileName(XAPI_TEMPLATE_FILENAME); xapiClass.outputPath(namer.getSourceFilePath(subPath, name)); xapiClass.fileHeader(fileHeaderTransformer.generateFileHeader(context)); xapiClass.protoFilename(context.getInterface().getFile().getSimpleName()); xapiClass.name(name); xapiClass.doc(serviceTransformer.generateServiceDoc(context, methods.get(0))); xapiClass.stubs(grpcStubTransformer.generateGrpcStubs(context)); ProductServiceConfig productServiceConfig = new ProductServiceConfig(); xapiClass.serviceAddress( productServiceConfig.getServiceAddress(context.getInterface().getModel())); xapiClass.servicePort(productServiceConfig.getServicePort()); xapiClass.serviceTitle(productServiceConfig.getTitle(context.getInterface().getModel())); xapiClass.authScopes(productServiceConfig.getAuthScopes(context.getInterface().getModel())); xapiClass.hasDefaultServiceAddress(context.getInterfaceConfig().hasDefaultServiceAddress()); xapiClass.hasDefaultServiceScopes(context.getInterfaceConfig().hasDefaultServiceScopes()); xapiClass.pageStreamingDescriptors(pageStreamingTransformer.generateDescriptors(context)); xapiClass.batchingDescriptors(batchingTransformer.generateDescriptors(context)); xapiClass.longRunningDescriptors(ImmutableList.<LongRunningOperationDetailView>of()); xapiClass.grpcStreamingDescriptors(ImmutableList.<GrpcStreamingDetailView>of()); xapiClass.hasPageStreamingMethods(context.getInterfaceConfig().hasPageStreamingMethods()); xapiClass.hasBatchingMethods(context.getInterfaceConfig().hasBatchingMethods()); xapiClass.hasLongRunningOperations(context.getInterfaceConfig().hasLongRunningOperations()); xapiClass.pathTemplates(pathTemplateTransformer.generatePathTemplates(context)); xapiClass.formatResourceFunctions( pathTemplateTransformer.generateFormatResourceFunctions(context)); xapiClass.parseResourceFunctions( pathTemplateTransformer.generateParseResourceFunctions(context)); xapiClass.pathTemplateGetterFunctions(ImmutableList.<PathTemplateGetterFunctionView>of()); xapiClass.methodKeys(ImmutableList.<String>of()); xapiClass.interfaceKey(context.getInterface().getFullName()); xapiClass.clientConfigPath(namer.getClientConfigPath(context.getInterface())); xapiClass.grpcClientTypeName( namer.getAndSaveNicknameForGrpcClientTypeName( context.getModelTypeTable(), context.getInterface())); xapiClass.apiMethods(methods); xapiClass.toolkitVersion(GeneratorVersionProvider.getGeneratorVersion()); xapiClass.packageVersion( packageConfig.generatedPackageVersionBound(TargetLanguage.RUBY).lower()); xapiClass.fullyQualifiedCredentialsClassName(namer.getFullyQualifiedCredentialsClassName()); return xapiClass.build(); } private List<ApiMethodView> generateApiMethods(GapicInterfaceContext context) { ImmutableList.Builder<ApiMethodView> apiMethods = ImmutableList.builder(); boolean packageHasMultipleServices = new InterfaceView().hasMultipleServices(context.getModel()); for (Method method : context.getSupportedMethods()) { apiMethods.add( apiMethodTransformer.generateMethod( context.asDynamicMethodContext(method), packageHasMultipleServices)); } return apiMethods.build(); } private ViewModel generateVersionIndexView(Model model, GapicProductConfig productConfig) { SurfaceNamer namer = new RubySurfaceNamer(productConfig.getPackageName()); ImmutableList.Builder<VersionIndexRequireView> requireViews = ImmutableList.builder(); Iterable<Interface> interfaces = new InterfaceView().getElementIterable(model); for (Interface apiInterface : interfaces) { GapicInterfaceContext context = createContext(apiInterface, productConfig); GapicInterfaceConfig interfaceConfig = productConfig.getInterfaceConfig(apiInterface); requireViews.add( VersionIndexRequireView.newBuilder() .clientName(namer.getFullyQualifiedApiWrapperClassName(interfaceConfig)) .fileName(namer.getServiceFileName(interfaceConfig)) .serviceName(namer.getPackageServiceName(apiInterface)) .doc( serviceTransformer.generateServiceDoc( context, generateApiMethods(context).get(0))) .build()); } return VersionIndexView.newBuilder() .apiVersion(packageConfig.apiVersion()) .requireViews(requireViews.build()) .templateFileName(VERSION_INDEX_TEMPLATE_FILE) .packageVersion(packageConfig.generatedPackageVersionBound(TargetLanguage.RUBY).lower()) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, ImportSectionView.newBuilder().build(), namer)) .outputPath("lib" + File.separator + versionPackagePath(namer) + ".rb") .modules(generateModuleViews(model, productConfig, true)) .type(VersionIndexType.VersionIndex) .toolkitVersion(GeneratorVersionProvider.getGeneratorVersion()) .build(); } private ViewModel generateCredentialsView(Model model, GapicProductConfig productConfig) { SurfaceNamer namer = new RubySurfaceNamer(productConfig.getPackageName()); CredentialsClassView credentialsClass = generateCredentialsClass(model, productConfig); ImportSectionView importSection = ImportSectionView.newBuilder() .externalImports( ImmutableList.of( ImportFileView.newBuilder() .moduleName("google/gax") .types(ImmutableList.<ImportTypeView>of()) .build())) .build(); List<String> modules = namer.getTopLevelApiModules(); return CredentialsClassFileView.newBuilder() .outputPath("lib" + File.separator + namer.getCredentialsClassImportName() + ".rb") .templateFileName(CREDENTIALS_CLASS_TEMPLATE_FILE) .credentialsClass(credentialsClass) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, importSection, namer, ImmutableList.copyOf(modules))) .build(); } private CredentialsClassView generateCredentialsClass( Model model, GapicProductConfig productConfig) { ProductServiceConfig productServiceConfig = new ProductServiceConfig(); SurfaceNamer namer = new RubySurfaceNamer(productConfig.getPackageName()); String sanitizedShortName = packageConfig.shortName().replaceAll("[^A-Za-z0-9]", " "); Name.lowerCamel(sanitizedShortName.split(" ")); String apiSpecificPathEnvVar = namer.inittedConstantName(Name.lowerCamel(sanitizedShortName.split(" ")).join("keyfile")); String apiSpecificJsonEnvVar = namer.inittedConstantName( Name.lowerCamel(sanitizedShortName.split(" ")).join("keyfile").join("json")); List<String> pathEnvVars = ImmutableList.<String>builder() .add(apiSpecificPathEnvVar) .addAll(DEFAULT_PATH_ENV_VARS) .build(); List<String> jsonEnvVars = ImmutableList.<String>builder() .add(apiSpecificJsonEnvVar) .addAll(DEFAULT_JSON_ENV_VARS) .build(); return CredentialsClassView.newBuilder() .pathEnvVars(pathEnvVars) .jsonEnvVars(jsonEnvVars) .scopes(productServiceConfig.getAuthScopes(model)) .build(); } private ViewModel generateTopLevelIndexView(Model model, GapicProductConfig productConfig) { SurfaceNamer namer = new RubySurfaceNamer(productConfig.getPackageName()); ImmutableList.Builder<VersionIndexRequireView> requireViews = ImmutableList.builder(); Iterable<Interface> interfaces = new InterfaceView().getElementIterable(model); List<String> modules = namer.getTopLevelApiModules(); boolean hasMultipleServices = Iterables.size(interfaces) > 1; for (Interface apiInterface : interfaces) { GapicInterfaceContext context = createContext(apiInterface, productConfig); String clientName = namer.getPackageName(); String serviceName = namer.getPackageServiceName(apiInterface); if (hasMultipleServices) { clientName += "::" + serviceName; } String topLevelNamespace = namer.getTopLevelNamespace(); requireViews.add( VersionIndexRequireView.newBuilder() .clientName(clientName) .serviceName(serviceName) .fileName(versionPackagePath(namer)) .topLevelNamespace(topLevelNamespace) .doc( serviceTransformer.generateServiceDoc( context, generateApiMethods(context).get(0))) .build()); } String versionFileBasePath = namer.packageFilePathPiece(Name.upperCamel(modules.get(modules.size() - 1))); return VersionIndexView.newBuilder() .apiVersion(packageConfig.apiVersion()) .requireViews(requireViews.build()) .templateFileName(VERSION_INDEX_TEMPLATE_FILE) .packageVersion(packageConfig.generatedPackageVersionBound(TargetLanguage.RUBY).lower()) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, ImportSectionView.newBuilder().build(), namer)) .outputPath("lib" + File.separator + topLevelPackagePath(namer) + ".rb") .modules(generateModuleViews(model, productConfig, false)) .type(VersionIndexType.TopLevelIndex) .versionFileBasePath(versionFileBasePath) .toolkitVersion(GeneratorVersionProvider.getGeneratorVersion()) .build(); } private List<ModuleView> generateModuleViews( Model model, GapicProductConfig productConfig, boolean includeVersionModule) { SurfaceNamer namer = new RubySurfaceNamer(productConfig.getPackageName()); RubyPackageMetadataTransformer metadataTransformer = new RubyPackageMetadataTransformer(packageConfig); RubyPackageMetadataNamer packageNamer = new RubyPackageMetadataNamer(productConfig.getPackageName()); ImmutableList.Builder<ModuleView> moduleViews = ImmutableList.builder(); for (String moduleName : namer.getApiModules()) { if (moduleName.equals(namer.getModuleServiceName())) { moduleViews.add( metadataTransformer .generateReadmeMetadataView(model, productConfig, packageNamer) .moduleName(moduleName) .build()); } else if (includeVersionModule || !moduleName.equals(namer.getModuleVersionName())) { moduleViews.add(SimpleModuleView.newBuilder().moduleName(moduleName).build()); } } return moduleViews.build(); } private String versionPackagePath(SurfaceNamer namer) { List<String> parts = namer.getApiModules(); List<String> paths = new ArrayList<>(); for (String part : parts) { paths.add(namer.packageFilePathPiece(Name.upperCamel(part))); } return Joiner.on(File.separator).join(paths); } private String topLevelPackagePath(SurfaceNamer namer) { List<String> paths = new ArrayList<>(); for (String part : namer.getTopLevelApiModules()) { paths.add(namer.packageFilePathPiece(Name.upperCamel(part))); } return Joiner.on(File.separator).join(paths); } private GapicInterfaceContext createContext( Interface apiInterface, GapicProductConfig productConfig) { return GapicInterfaceContext.create( apiInterface, productConfig, new ModelTypeTable( new RubyTypeTable(productConfig.getPackageName()), new RubyModelTypeNameConverter(productConfig.getPackageName())), new RubySurfaceNamer(productConfig.getPackageName()), new RubyFeatureConfig()); } }
1
23,951
Guessing this locates the key you get when you run `gcloud auth application-default login`? What was the previous behavior/the behavior for the manual Veneer creds?
googleapis-gapic-generator
java
@@ -85,7 +85,7 @@ entireColumnSelection = [0, selected[1], this.view.wt.wtTable.getRowStrategy().cellCount - 1, selected[1]], columnSelected = entireColumnSelection.join(',') == selected.join(','); - return selected[0] < 0 || this.countRows() >= this.getSettings().maxRows || columnSelected; + return selected[0] < 0 || this.countRows() >= this.getSettings().maxRows || (columnSelected && this.countRows() > 1); } }, {
1
(function (Handsontable) { 'use strict'; function prepareVerticalAlignClass(className, alignment) { if (className.indexOf(alignment) != -1) { return className; } className = className .replace('htTop', '') .replace('htMiddle', '') .replace('htBottom', '') .replace(' ', ''); className += " " + alignment; return className; } function prepareHorizontalAlignClass(className, alignment) { if (className.indexOf(alignment) != -1) { return className; } className = className .replace('htLeft', '') .replace('htCenter', '') .replace('htRight', '') .replace('htJustify', '') .replace(' ', ''); className += " " + alignment; return className; } function doAlign(row, col, type, alignment) { var cellMeta = this.getCellMeta(row, col), className = alignment; if (cellMeta.className) { if (type === 'vertical') { className = prepareVerticalAlignClass(cellMeta.className, alignment); } else { className = prepareHorizontalAlignClass(cellMeta.className, alignment); } } this.setCellMeta(row, col, 'className', className); this.render(); } function align(range, type, alignment) { if (range.from.row == range.to.row && range.from.col == range.to.col) { doAlign.call(this, range.from.row, range.from.col, type, alignment); } else { for (var row = range.from.row; row <= range.to.row; row++) { for (var col = range.from.col; col <= range.to.col; col++) { doAlign.call(this, row, col, type, alignment); } } } } function ContextMenu(instance, customOptions) { this.instance = instance; var contextMenu = this; contextMenu.menus = []; contextMenu.triggerRows = []; this.enabled = true; this.instance.addHook('afterDestroy', function () { contextMenu.destroy(); }); this.defaultOptions = { items: [ { key: 'row_above', name: 'Insert row above', callback: function (key, selection) { this.alter("insert_row", selection.start.row); }, disabled: function () { var selected = this.getSelected(), entireColumnSelection = [0, selected[1], this.view.wt.wtTable.getRowStrategy().cellCount - 1, selected[1]], columnSelected = entireColumnSelection.join(',') == selected.join(','); return selected[0] < 0 || this.countRows() >= this.getSettings().maxRows || columnSelected; } }, { key: 'row_below', name: 'Insert row below', callback: function (key, selection) { this.alter("insert_row", selection.end.row + 1); }, disabled: function () { var selected = this.getSelected(), entireColumnSelection = [0, selected[1], this.view.wt.wtTable.getRowStrategy().cellCount - 1, selected[1]], columnSelected = entireColumnSelection.join(',') == selected.join(','); return this.getSelected()[0] < 0 || this.countRows() >= this.getSettings().maxRows || columnSelected; } }, ContextMenu.SEPARATOR, { key: 'col_left', name: 'Insert column on the left', callback: function (key, selection) { this.alter("insert_col", selection.start.col); }, disabled: function () { var selected = this.getSelected(), entireRowSelection = [selected[0], 0, selected[0], this.view.wt.wtTable.getColumnStrategy().cellCount - 1], rowSelected = entireRowSelection.join(',') == selected.join(','); return this.getSelected()[1] < 0 || this.countCols() >= this.getSettings().maxCols || rowSelected; } }, { key: 'col_right', name: 'Insert column on the right', callback: function (key, selection) { this.alter("insert_col", selection.end.col + 1); }, disabled: function () { var selected = this.getSelected(), entireRowSelection = [selected[0], 0, selected[0], this.view.wt.wtTable.getColumnStrategy().cellCount - 1], rowSelected = entireRowSelection.join(',') == selected.join(','); return selected[1] < 0 || this.countCols() >= this.getSettings().maxCols || rowSelected; } }, ContextMenu.SEPARATOR, { key: 'remove_row', name: 'Remove row', callback: function (key, selection) { var amount = selection.end.row - selection.start.row + 1; this.alter("remove_row", selection.start.row, amount); }, disabled: function () { var selected = this.getSelected(), entireColumnSelection = [0, selected[1], this.view.wt.wtTable.getRowStrategy().cellCount - 1, selected[1]], columnSelected = entireColumnSelection.join(',') == selected.join(','); return (selected[0] < 0 || columnSelected); } }, { key: 'remove_col', name: 'Remove column', callback: function (key, selection) { var amount = selection.end.col - selection.start.col + 1; this.alter("remove_col", selection.start.col, amount); }, disabled: function () { var selected = this.getSelected(), entireRowSelection = [selected[0], 0, selected[0], this.view.wt.wtTable.getColumnStrategy().cellCount - 1], rowSelected = entireRowSelection.join(',') == selected.join(','); return (selected[1] < 0 || rowSelected); } }, ContextMenu.SEPARATOR, { key: 'undo', name: 'Undo', callback: function () { this.undo(); }, disabled: function () { return this.undoRedo && !this.undoRedo.isUndoAvailable(); } }, { key: 'redo', name: 'Redo', callback: function () { this.redo(); }, disabled: function () { return this.undoRedo && !this.undoRedo.isRedoAvailable(); } }, ContextMenu.SEPARATOR, { key: 'make_read_only', name: function () { var label = "Read only"; var atLeastOneReadOnly = contextMenu.checkSelectionReadOnlyConsistency(this); if (atLeastOneReadOnly) { label = contextMenu.markSelected(label); } return label; }, callback: function () { var atLeastOneReadOnly = contextMenu.checkSelectionReadOnlyConsistency(this); var that = this; this.getSelectedRange().forAll(function (r, c) { that.getCellMeta(r, c).readOnly = atLeastOneReadOnly ? false : true; }); this.render(); } }, ContextMenu.SEPARATOR, { key: 'alignment', name: 'Alignment', submenu: { items: [ { name: function () { var label = "Left"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htLeft'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'horizontal', 'htLeft'); }, disabled: false }, { name: function () { var label = "Center"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htCenter'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'horizontal', 'htCenter'); }, disabled: false }, { name: function () { var label = "Right"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htRight'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'horizontal', 'htRight'); }, disabled: false }, { name: function () { var label = "Justify"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htJustify'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'horizontal', 'htJustify'); }, disabled: false }, ContextMenu.SEPARATOR, { name: function () { var label = "Top"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htTop'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'vertical', 'htTop'); }, disabled: false }, { name: function () { var label = "Middle"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htMiddle'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'vertical', 'htMiddle'); }, disabled: false }, { name: function () { var label = "Bottom"; var hasClass = contextMenu.checkSelectionAlignment(this, 'htBottom'); if (hasClass) { label = contextMenu.markSelected(label); } return label; }, callback: function () { align.call(this, this.getSelectedRange(), 'vertical', 'htBottom'); }, disabled: false } ] } } ] }; contextMenu.options = {}; Handsontable.helper.extend(contextMenu.options, this.options); this.bindMouseEvents(); this.markSelected = function (label) { return "<span class='selected'>✓</span>" + label; }; this.checkSelectionAlignment = function (hot, className) { var hasAlignment = false; hot.getSelectedRange().forAll(function (r, c) { var metaClassName = hot.getCellMeta(r, c).className; if (metaClassName && metaClassName.indexOf(className) != -1) { hasAlignment = true; return false; } }); return hasAlignment; }; this.checkSelectionReadOnlyConsistency = function (hot) { var atLeastOneReadOnly = false; hot.getSelectedRange().forAll(function (r, c) { if (hot.getCellMeta(r, c).readOnly) { atLeastOneReadOnly = true; return false; //breaks forAll } }); return atLeastOneReadOnly; }; Handsontable.hooks.run(instance, 'afterContextMenuDefaultOptions', this.defaultOptions); } ContextMenu.prototype.createMenu = function (menuName, row) { if (menuName) { menuName = menuName.replace(/ /g, '_'); // replace all spaces in name menuName = 'htContextSubMenu_' + menuName; } var menu; if (menuName) { menu = $('body > .htContextMenu.' + menuName)[0]; } else { menu = $('body > .htContextMenu')[0]; } if (!menu) { menu = document.createElement('DIV'); Handsontable.Dom.addClass(menu, 'htContextMenu'); if (menuName) { Handsontable.Dom.addClass(menu, menuName); } document.getElementsByTagName('body')[0].appendChild(menu); } if (this.menus.indexOf(menu) < 0) { this.menus.push(menu); row = row || 0; this.triggerRows.push(row); } return menu; }; ContextMenu.prototype.bindMouseEvents = function () { function contextMenuOpenListener(event) { var settings = this.instance.getSettings(); // if(!settings.contextMenu) { // return; // } this.closeAll(); event.preventDefault(); event.stopPropagation(); var showRowHeaders = this.instance.getSettings().rowHeaders, showColHeaders = this.instance.getSettings().colHeaders; if (!(showRowHeaders || showColHeaders)) { if (event.target.nodeName != 'TD' && !(Handsontable.Dom.hasClass(event.target, 'current') && Handsontable.Dom.hasClass(event.target, 'wtBorder'))) { return; } } var menu = this.createMenu(); var items = this.getItems(settings.contextMenu); this.show(menu, items); this.setMenuPosition(event, menu); $(document).on('mousedown.htContextMenu', Handsontable.helper.proxy(ContextMenu.prototype.closeAll, this)); } this.instance.rootElement.on('contextmenu.htContextMenu', Handsontable.helper.proxy(contextMenuOpenListener, this)); }; ContextMenu.prototype.bindTableEvents = function () { var that = this; this._afterScrollCallback = function () { // that.close(); }; this.instance.addHook('afterScrollVertically', this._afterScrollCallback); this.instance.addHook('afterScrollHorizontally', this._afterScrollCallback); }; ContextMenu.prototype.unbindTableEvents = function () { if (this._afterScrollCallback) { this.instance.removeHook('afterScrollVertically', this._afterScrollCallback); this.instance.removeHook('afterScrollHorizontally', this._afterScrollCallback); this._afterScrollCallback = null; } }; ContextMenu.prototype.performAction = function (event, menu) { var contextMenu = this; var hot = $(menu).handsontable('getInstance'); var selectedItemIndex = hot.getSelected()[0]; var selectedItem = hot.getData()[selectedItemIndex]; if (selectedItem.disabled === true || (typeof selectedItem.disabled == 'function' && selectedItem.disabled.call(this.instance) === true)) { return; } if (!selectedItem.hasOwnProperty('submenu')) { if (typeof selectedItem.callback != 'function') { return; } var selRange = this.instance.getSelectedRange(); var normalizedSelection = ContextMenu.utils.normalizeSelection(selRange); selectedItem.callback.call(this.instance, selectedItem.key, normalizedSelection, event); contextMenu.closeAll(); } }; ContextMenu.prototype.unbindMouseEvents = function () { this.instance.rootElement.off('contextmenu.htContextMenu'); $(document).off('mousedown.htContextMenu'); }; ContextMenu.prototype.show = function (menu, items) { menu.removeAttribute('style'); menu.style.display = 'block'; var that = this; $(menu) .off('mousedown.htContextMenu') .on('mousedown.htContextMenu', function (event) { that.performAction(event, menu) }); $(menu).handsontable({ data: items, colHeaders: false, colWidths: [200], readOnly: true, copyPaste: false, columns: [ { data: 'name', renderer: Handsontable.helper.proxy(this.renderer, this) } ], beforeKeyDown: function (event) { that.onBeforeKeyDown(event, menu); }, afterOnCellMouseOver: function (event, coords, TD) { that.onCellMouseOver(event, coords, TD, menu); }, renderAllRows: true }); this.bindTableEvents(); $(menu).handsontable('listen'); }; ContextMenu.prototype.close = function (menu) { this.hide(menu); $(document).off('mousedown.htContextMenu'); this.unbindTableEvents(); this.instance.listen(); }; ContextMenu.prototype.closeAll = function () { while (this.menus.length > 0) { var menu = this.menus.pop(); if (menu) { this.close(menu); } } this.triggerRows = []; }; ContextMenu.prototype.closeLastOpenedSubMenu = function () { var menu = this.menus.pop(); if (menu) { this.hide(menu); // this.close(menu); } }; ContextMenu.prototype.hide = function (menu) { menu.style.display = 'none'; $(menu).handsontable('destroy'); }; ContextMenu.prototype.renderer = function (instance, TD, row, col, prop, value) { var contextMenu = this; var item = instance.getData()[row]; var wrapper = document.createElement('DIV'); if (typeof value === 'function') { value = value.call(this.instance); } Handsontable.Dom.empty(TD); TD.appendChild(wrapper); if (itemIsSeparator(item)) { Handsontable.Dom.addClass(TD, 'htSeparator'); } else { Handsontable.Dom.fastInnerHTML(wrapper, value); } if (itemIsDisabled(item)) { Handsontable.Dom.addClass(TD, 'htDisabled'); $(wrapper).on('mouseenter', function () { instance.deselectCell(); }); } else { if (isSubMenu(item)) { Handsontable.Dom.addClass(TD, 'htSubmenu'); $(wrapper).on('mouseenter', function () { instance.selectCell(row, col); }); } else { Handsontable.Dom.removeClass(TD, 'htSubmenu'); Handsontable.Dom.removeClass(TD, 'htDisabled'); $(wrapper).on('mouseenter', function () { instance.selectCell(row, col); }); } } function isSubMenu(item) { return item.hasOwnProperty('submenu'); } function itemIsSeparator(item) { return new RegExp(ContextMenu.SEPARATOR.name, 'i').test(item.name); } function itemIsDisabled(item) { return item.disabled === true || (typeof item.disabled == 'function' && item.disabled.call(contextMenu.instance) === true); } }; ContextMenu.prototype.onCellMouseOver = function (event, coords, TD, menu) { var hot = $(menu).handsontable('getInstance'); var menusLength = this.menus.length; if (menusLength > 0) { var lastMenu = this.menus[menusLength - 1]; if (lastMenu.id != menu.id) { this.closeLastOpenedSubMenu(); } } else { this.closeLastOpenedSubMenu(); } if (TD.className.indexOf('htSubmenu') != -1) { var selectedItem = hot.getData()[coords.row]; var items = this.getItems(selectedItem.submenu); var subMenu = this.createMenu(selectedItem.name, coords.row); var tdCoords = TD.getBoundingClientRect(); this.show(subMenu, items); this.setSubMenuPosition(tdCoords, subMenu); } }; ContextMenu.prototype.onBeforeKeyDown = function (event, menu) { var contextMenu = this; var instance = $(menu).handsontable('getInstance'); var selection = instance.getSelected(); switch (event.keyCode) { case Handsontable.helper.keyCode.ESCAPE: contextMenu.closeAll(); event.preventDefault(); event.stopImmediatePropagation(); break; case Handsontable.helper.keyCode.ENTER: if (selection) { contextMenu.performAction(event, menu); } break; case Handsontable.helper.keyCode.ARROW_DOWN: if (!selection) { selectFirstCell(instance, contextMenu); } else { selectNextCell(selection[0], selection[1], instance, contextMenu); } event.preventDefault(); event.stopImmediatePropagation(); break; case Handsontable.helper.keyCode.ARROW_UP: if (!selection) { selectLastCell(instance, contextMenu); } else { selectPrevCell(selection[0], selection[1], instance, contextMenu); } event.preventDefault(); event.stopImmediatePropagation(); break; case Handsontable.helper.keyCode.ARROW_RIGHT: if (selection) { var row = selection[0]; var cell = instance.getCell(selection[0], 0); if (ContextMenu.utils.hasSubMenu(cell)) { openSubMenu(instance, contextMenu, cell, row); } } event.preventDefault(); event.stopImmediatePropagation(); break; case Handsontable.helper.keyCode.ARROW_LEFT: if (selection) { if (menu.className.indexOf('htContextSubMenu_') != -1) { contextMenu.closeLastOpenedSubMenu(); var index = contextMenu.menus.length; if (index > 0) { menu = contextMenu.menus[index - 1]; var triggerRow = contextMenu.triggerRows.pop(); instance = $(menu).handsontable('getInstance'); instance.selectCell(triggerRow, 0); } } event.preventDefault(); event.stopImmediatePropagation(); } break; } function selectFirstCell(instance) { var firstCell = instance.getCell(0, 0); if (ContextMenu.utils.isSeparator(firstCell) || ContextMenu.utils.isDisabled(firstCell)) { selectNextCell(0, 0, instance); } else { instance.selectCell(0, 0); } } function selectLastCell(instance) { var lastRow = instance.countRows() - 1; var lastCell = instance.getCell(lastRow, 0); if (ContextMenu.utils.isSeparator(lastCell) || ContextMenu.utils.isDisabled(lastCell)) { selectPrevCell(lastRow, 0, instance); } else { instance.selectCell(lastRow, 0); } } function selectNextCell(row, col, instance) { var nextRow = row + 1; var nextCell = nextRow < instance.countRows() ? instance.getCell(nextRow, col) : null; if (!nextCell) { return; } if (ContextMenu.utils.isSeparator(nextCell) || ContextMenu.utils.isDisabled(nextCell)) { selectNextCell(nextRow, col, instance); } else { instance.selectCell(nextRow, col); } } function selectPrevCell(row, col, instance) { var prevRow = row - 1; var prevCell = prevRow >= 0 ? instance.getCell(prevRow, col) : null; if (!prevCell) { return; } if (ContextMenu.utils.isSeparator(prevCell) || ContextMenu.utils.isDisabled(prevCell)) { selectPrevCell(prevRow, col, instance); } else { instance.selectCell(prevRow, col); } } function openSubMenu(instance, contextMenu, cell, row) { var selectedItem = instance.getData()[row]; var items = contextMenu.getItems(selectedItem.submenu); var subMenu = contextMenu.createMenu(selectedItem.name, row); var coords = cell.getBoundingClientRect(); contextMenu.show(subMenu, items); contextMenu.setSubMenuPosition(coords, subMenu); var subMenuInstance = $(subMenu).handsontable('getInstance'); subMenuInstance.selectCell(0, 0); } }; function findByKey(items, key) { for (var i = 0, ilen = items.length; i < ilen; i++) { if (items[i].key === key) { return items[i]; } } } ContextMenu.prototype.getItems = function (items) { var menu, item; function ContextMenuItem(rawItem) { if (typeof rawItem == 'string') { this.name = rawItem; } else { Handsontable.helper.extend(this, rawItem); } } ContextMenuItem.prototype = items; if (items && items.items) { items = items.items; } if (items === true) { items = this.defaultOptions.items; } /*else if (Handsontable.helper.isArray(items)) { menu = []; for (var i = 0, ilen = items.length; i < ilen; i++) { if (typeof items[i] === 'string') { item = findByKey(this.defaultOptions.items, items[i]); } else { item = items[i]; } menu.push(new ContextMenuItem(item || items[i])); } }*/ if (1 == 1) { menu = []; for (var key in items) { if (items.hasOwnProperty(key)) { if (typeof items[key] === 'string') { item = findByKey(this.defaultOptions.items, items[key]); } else { item = findByKey(this.defaultOptions.items, key); } if (!item) { item = items[key]; } item = new ContextMenuItem(item); if (typeof items[key] === 'object') { Handsontable.helper.extend(item, items[key]); } if (!item.key) { item.key = key; } menu.push(item); } } } return menu; }; ContextMenu.prototype.setSubMenuPosition = function (coords, menu) { var scrollTop = Handsontable.Dom.getWindowScrollTop(); var scrollLeft = Handsontable.Dom.getWindowScrollLeft(); var cursor = { top: scrollTop + coords.top, topRelative: coords.top, left: coords.left, leftRelative: coords.left - scrollLeft, scrollTop: scrollTop, scrollLeft: scrollLeft, cellHeight: coords.height, cellWidth: coords.width }; if (this.menuFitsBelowCursor(cursor, menu)) { this.positionMenuBelowCursor(cursor, menu, true); } else { if (this.menuFitsAboveCursor(cursor, menu)) { this.positionMenuAboveCursor(cursor, menu, true); } else { this.positionMenuBelowCursor(cursor, menu, true); } } if (this.menuFitsOnRightOfCursor(cursor, menu)) { this.positionMenuOnRightOfCursor(cursor, menu, true); } else { this.positionMenuOnLeftOfCursor(cursor, menu, true); } }; ContextMenu.prototype.setMenuPosition = function (event, menu) { var cursorY = event.pageY; var cursorX = event.pageX; var scrollTop = Handsontable.Dom.getWindowScrollTop(); var scrollLeft = Handsontable.Dom.getWindowScrollLeft(); var cursor = { top: cursorY, topRelative: cursorY - scrollTop, left: cursorX, leftRelative: cursorX - scrollLeft, scrollTop: scrollTop, scrollLeft: scrollLeft, cellHeight: event.target.clientHeight, cellWidth: event.target.clientWidth }; if (this.menuFitsBelowCursor(cursor, menu)) { this.positionMenuBelowCursor(cursor, menu); } else { if (this.menuFitsAboveCursor(cursor, menu)) { this.positionMenuAboveCursor(cursor, menu); } else { this.positionMenuBelowCursor(cursor, menu); } } if (this.menuFitsOnRightOfCursor(cursor, menu)) { this.positionMenuOnRightOfCursor(cursor, menu); } else { this.positionMenuOnLeftOfCursor(cursor, menu); } }; ContextMenu.prototype.menuFitsAboveCursor = function (cursor, menu) { return cursor.topRelative >= menu.offsetHeight; }; ContextMenu.prototype.menuFitsBelowCursor = function (cursor, menu) { return cursor.topRelative + menu.offsetHeight <= cursor.scrollTop + document.body.clientHeight; }; ContextMenu.prototype.menuFitsOnRightOfCursor = function (cursor, menu) { return cursor.leftRelative + menu.offsetWidth <= cursor.scrollLeft + document.body.clientWidth; }; ContextMenu.prototype.positionMenuBelowCursor = function (cursor, menu) { menu.style.top = cursor.top + 'px'; }; ContextMenu.prototype.positionMenuAboveCursor = function (cursor, menu, subMenu) { if (subMenu) { menu.style.top = (cursor.top + cursor.cellHeight - menu.offsetHeight) + 'px'; } else { menu.style.top = (cursor.top - menu.offsetHeight) + 'px'; } }; ContextMenu.prototype.positionMenuOnRightOfCursor = function (cursor, menu, subMenu) { if (subMenu) { menu.style.left = 1 + cursor.left + cursor.cellWidth + 'px'; } else { menu.style.left = 1 + cursor.left + 'px'; } }; ContextMenu.prototype.positionMenuOnLeftOfCursor = function (cursor, menu, subMenu) { if (subMenu) { menu.style.left = (cursor.left - menu.offsetWidth) + 'px'; } else { menu.style.left = (cursor.left - menu.offsetWidth) + 'px'; } }; ContextMenu.utils = {}; ContextMenu.utils.normalizeSelection = function (selRange) { return { start: selRange.getTopLeftCorner(), end: selRange.getBottomRightCorner() } }; ContextMenu.utils.isSeparator = function (cell) { return Handsontable.Dom.hasClass(cell, 'htSeparator'); }; ContextMenu.utils.hasSubMenu = function (cell) { return Handsontable.Dom.hasClass(cell, 'htSubmenu'); }; ContextMenu.utils.isDisabled = function (cell) { return Handsontable.Dom.hasClass(cell, 'htDisabled'); }; ContextMenu.prototype.enable = function () { if (!this.enabled) { this.enabled = true; this.bindMouseEvents(); } }; ContextMenu.prototype.disable = function () { if (this.enabled) { this.enabled = false; this.closeAll(); this.unbindMouseEvents(); this.unbindTableEvents(); } }; ContextMenu.prototype.destroy = function () { this.closeAll(); while (this.menus.length > 0) { var menu = this.menus.pop(); this.triggerRows.pop(); if (menu) { this.close(menu); if (!this.isMenuEnabledByOtherHotInstance()) { this.removeMenu(menu); } } } this.unbindMouseEvents(); this.unbindTableEvents(); }; ContextMenu.prototype.isMenuEnabledByOtherHotInstance = function () { var hotContainers = $('.handsontable'); var menuEnabled = false; for (var i = 0, len = hotContainers.length; i < len; i++) { var instance = $(hotContainers[i]).handsontable('getInstance'); if (instance && instance.getSettings().contextMenu) { menuEnabled = true; break; } } return menuEnabled; }; ContextMenu.prototype.removeMenu = function (menu) { if (menu.parentNode) { this.menu.parentNode.removeChild(menu); } }; ContextMenu.SEPARATOR = {name: "---------"}; function updateHeight() { if (this.rootElement[0].className.indexOf('htContextMenu')) { return; } var realSeparatorHeight = 0, realEntrySize = 0, dataSize = this.getSettings().data.length; for (var i = 0; i < dataSize; i++) { if (this.getSettings().data[i].name == ContextMenu.SEPARATOR.name) { realSeparatorHeight += 2; } else { realEntrySize += 26; } } this.view.wt.wtScrollbars.vertical.fixedContainer.style.height = realEntrySize + realSeparatorHeight + "px"; } function init() { var instance = this; var contextMenuSetting = instance.getSettings().contextMenu; var customOptions = Handsontable.helper.isObject(contextMenuSetting) ? contextMenuSetting : {}; if (contextMenuSetting) { if (!instance.contextMenu) { instance.contextMenu = new ContextMenu(instance, customOptions); } instance.contextMenu.enable(); } else if (instance.contextMenu) { instance.contextMenu.destroy(); delete instance.contextMenu; } } Handsontable.hooks.add('afterInit', init); Handsontable.hooks.add('afterUpdateSettings', init); Handsontable.hooks.add('afterInit', updateHeight); Handsontable.PluginHooks.register('afterContextMenuDefaultOptions'); Handsontable.ContextMenu = ContextMenu; })(Handsontable);
1
13,873
looks good to me, but it would be better if you assigned `this.countRows()` into a variable, so it doesn't trigger twice in the return statement
handsontable-handsontable
js