patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -8,6 +8,7 @@ package blockchain import ( "context" + "github.com/iotexproject/iotex-address/address" "io/ioutil" "math/big" "os"
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import ( "context" "io/ioutil" "math/big" "os" "strings" "testing" "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/action/protocol/account" "github.com/iotexproject/iotex-core/action/protocol/execution" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/state/factory" "github.com/iotexproject/iotex-core/test/identityset" "github.com/iotexproject/iotex-core/testutil" ) func TestWrongRootHash(t *testing.T) { require := require.New(t) val := validator{sf: nil, validatorAddr: "", enableExperimentalActions: true} tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 1, big.NewInt(20), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash := tsf1.Hash() blk, err := block.NewTestingBuilder(). SetHeight(1). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, tsf2). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) require.Nil(val.Validate(&blk, 0, blkhash)) blk.Actions[0], blk.Actions[1] = blk.Actions[1], blk.Actions[0] require.NotNil(val.Validate(&blk, 0, blkhash)) } func TestSignBlock(t *testing.T) { require := require.New(t) val := validator{sf: nil, validatorAddr: "", enableExperimentalActions: true} tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 1, big.NewInt(20), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash := tsf1.Hash() blk, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, tsf2). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) require.Nil(val.Validate(&blk, 2, blkhash)) } func TestWrongNonce(t *testing.T) { cfg := config.Default testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie") testTriePath := testTrieFile.Name() cfg.Chain.TrieDBPath = testTriePath testDBFile, _ := ioutil.TempFile(os.TempDir(), "db") testDBPath := testDBFile.Name() cfg.Chain.ChainDBPath = testDBPath require := require.New(t) sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) require.NoError(err) hu := config.NewHeightUpgrade(cfg) sf.AddActionHandlers(account.NewProtocol(hu)) // Create a blockchain from scratch bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) require.NoError(bc.Start(context.Background())) defer func() { require.NoError(bc.Stop(context.Background())) }() require.NoError(addCreatorToFactory(sf)) val := &validator{sf: sf, validatorAddr: "", enableExperimentalActions: true} val.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) val.AddActionValidators(account.NewProtocol(hu)) // correct nonce tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 1, big.NewInt(20), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash := tsf1.Hash() blk, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) require.Nil(val.Validate(&blk, 2, blkhash)) ws, err := sf.NewWorkingSet() require.NoError(err) gasLimit := testutil.TestGasLimit ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{ Producer: identityset.Address(27), GasLimit: gasLimit, }) _, err = ws.RunActions(ctx, 1, []action.SealedEnvelope{tsf1}) require.NoError(err) require.Nil(sf.Commit(ws)) // low nonce tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, tsf2). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(&blk, 2, blkhash) require.Equal(action.ErrNonce, errors.Cause(err)) tsf3, err := testutil.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf3). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(&blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) // duplicate nonce tsf4, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf5, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf4, tsf5). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(&blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) tsf6, err := testutil.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf7, err := testutil.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf6, tsf7). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(&blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) // non consecutive nonce tsf8, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf9, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 4, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf8, tsf9). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(&blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) tsf10, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf11, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 4, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf10, tsf11). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(&blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) } func TestWrongAddress(t *testing.T) { ctx := context.Background() cfg := config.Default bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption()) hu := config.NewHeightUpgrade(cfg) bc.GetFactory().AddActionHandlers(account.NewProtocol(hu)) require.NoError(t, bc.Start(ctx)) require.NotNil(t, bc) defer func() { err := bc.Stop(ctx) require.NoError(t, err) }() val := &validator{sf: bc.GetFactory(), validatorAddr: "", enableExperimentalActions: true} val.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc)) val.AddActionValidators(account.NewProtocol(hu), execution.NewProtocol(bc, hu)) invalidRecipient := "io1qyqsyqcyq5narhapakcsrhksfajfcpl24us3xp38zwvsep" tsf, err := action.NewTransfer(1, big.NewInt(1), invalidRecipient, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(t, err) bd := &action.EnvelopeBuilder{} elp := bd.SetAction(tsf).SetGasLimit(100000). SetGasPrice(big.NewInt(10)). SetNonce(1).Build() selp, err := action.Sign(elp, identityset.PrivateKey(27)) require.NoError(t, err) blk1, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(hash.ZeroHash256). SetTimeStamp(testutil.TimestampNow()). AddActions(selp). SignAndBuild(identityset.PrivateKey(27)) require.NoError(t, err) err = val.validateActionsOnly( blk1.Actions, blk1.PublicKey(), blk1.Height(), ) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "error when validating recipient's address")) invalidContract := "123" execution, err := action.NewExecution(invalidContract, 1, big.NewInt(1), uint64(100000), big.NewInt(10), []byte{}) require.NoError(t, err) bd = &action.EnvelopeBuilder{} elp = bd.SetAction(execution).SetGasLimit(100000). SetGasPrice(big.NewInt(10)). SetNonce(1).Build() selp, err = action.Sign(elp, identityset.PrivateKey(27)) require.NoError(t, err) blk3, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(hash.ZeroHash256). SetTimeStamp(testutil.TimestampNow()). AddActions(selp). SignAndBuild(identityset.PrivateKey(27)) require.NoError(t, err) err = val.validateActionsOnly( blk3.Actions, blk3.PublicKey(), blk3.Height(), ) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "error when validating contract's address")) }
1
18,864
let's group imports in the order: std third party first party
iotexproject-iotex-core
go
@@ -1062,7 +1062,7 @@ public class JdbcExecutorLoaderTest { return new JdbcExecutorLoader(props, new CommonMetrics(new MetricsManager(new MetricRegistry())), null , null, null, null, null, - null, null); + null, null, null); } private boolean isTestSetup() {
1
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.database.DataSourceUtils; import azkaban.executor.ExecutorLogEvent.EventType; import azkaban.metrics.CommonMetrics; import azkaban.metrics.MetricsManager; import azkaban.test.executions.ExecutionsTestUtil; import azkaban.user.User; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.Pair; import azkaban.utils.Props; import azkaban.utils.TestUtils; import com.codahale.metrics.MetricRegistry; import java.io.File; import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.time.Duration; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import javax.sql.DataSource; import org.apache.commons.dbutils.DbUtils; import org.apache.commons.dbutils.QueryRunner; import org.apache.commons.dbutils.ResultSetHandler; import org.joda.time.DateTime; import org.joda.time.DateTimeUtils; import org.junit.After; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; public class JdbcExecutorLoaderTest { private static final String LOG_TEST_DIR_NAME = "logtest"; // @TODO remove this and turn into local host. private static final String host = "localhost"; private static final int port = 3306; private static final String database = "azkaban2"; private static final String user = "azkaban"; private static final String password = "azkaban"; private static final int numConnections = 10; private static final Duration RECENTLY_FINISHED_LIFETIME = Duration.ofMinutes(1); private static final Duration FLOW_FINISHED_TIME = Duration.ofMinutes(2); private static boolean testDBExists; @BeforeClass public static void setupDB() { final DataSource dataSource = DataSourceUtils.getMySQLDataSource(host, port, database, user, password, numConnections); testDBExists = true; Connection connection = null; try { connection = dataSource.getConnection(); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } final CountHandler countHandler = new CountHandler(); final QueryRunner runner = new QueryRunner(); try { runner.query(connection, "SELECT COUNT(1) FROM active_executing_flows", countHandler); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.query(connection, "SELECT COUNT(1) FROM execution_flows", countHandler); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.query(connection, "SELECT COUNT(1) FROM execution_jobs", countHandler); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.query(connection, "SELECT COUNT(1) FROM execution_logs", countHandler); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.query(connection, "SELECT COUNT(1) FROM executors", countHandler); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.query(connection, "SELECT COUNT(1) FROM executor_events", countHandler); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } DbUtils.closeQuietly(connection); } @After public void clearDB() { if (!testDBExists) { return; } final DataSource dataSource = DataSourceUtils.getMySQLDataSource(host, port, database, user, password, numConnections); Connection connection = null; try { connection = dataSource.getConnection(); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } final QueryRunner runner = new QueryRunner(); try { runner.update(connection, "DELETE FROM active_executing_flows"); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.update(connection, "DELETE FROM execution_flows"); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.update(connection, "DELETE FROM execution_jobs"); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.update(connection, "DELETE FROM execution_logs"); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.update(connection, "DELETE FROM executors"); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } try { runner.update(connection, "DELETE FROM executor_events"); } catch (final SQLException e) { e.printStackTrace(); testDBExists = false; DbUtils.closeQuietly(connection); return; } DbUtils.closeQuietly(connection); } @Test public void testUploadExecutionFlows() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); final ExecutableFlow fetchFlow = loader.fetchExecutableFlow(flow.getExecutionId()); // Shouldn't be the same object. Assert.assertTrue(flow != fetchFlow); Assert.assertEquals(flow.getExecutionId(), fetchFlow.getExecutionId()); Assert.assertEquals(flow.getEndTime(), fetchFlow.getEndTime()); Assert.assertEquals(flow.getStartTime(), fetchFlow.getStartTime()); Assert.assertEquals(flow.getSubmitTime(), fetchFlow.getSubmitTime()); Assert.assertEquals(flow.getFlowId(), fetchFlow.getFlowId()); Assert.assertEquals(flow.getProjectId(), fetchFlow.getProjectId()); Assert.assertEquals(flow.getVersion(), fetchFlow.getVersion()); Assert.assertEquals(flow.getExecutionOptions().getFailureAction(), fetchFlow.getExecutionOptions().getFailureAction()); Assert.assertEquals(new HashSet<>(flow.getEndNodes()), new HashSet<>(fetchFlow.getEndNodes())); } @Test public void testUpdateExecutionFlows() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); final ExecutableFlow fetchFlow2 = loader.fetchExecutableFlow(flow.getExecutionId()); fetchFlow2.setEndTime(System.currentTimeMillis()); fetchFlow2.setStatus(Status.SUCCEEDED); loader.updateExecutableFlow(fetchFlow2); final ExecutableFlow fetchFlow = loader.fetchExecutableFlow(flow.getExecutionId()); // Shouldn't be the same object. Assert.assertTrue(flow != fetchFlow); Assert.assertEquals(flow.getExecutionId(), fetchFlow.getExecutionId()); Assert.assertEquals(fetchFlow2.getEndTime(), fetchFlow.getEndTime()); Assert.assertEquals(fetchFlow2.getStatus(), fetchFlow.getStatus()); Assert.assertEquals(flow.getStartTime(), fetchFlow.getStartTime()); Assert.assertEquals(flow.getSubmitTime(), fetchFlow.getSubmitTime()); Assert.assertEquals(flow.getFlowId(), fetchFlow.getFlowId()); Assert.assertEquals(flow.getProjectId(), fetchFlow.getProjectId()); Assert.assertEquals(flow.getVersion(), fetchFlow.getVersion()); Assert.assertEquals(flow.getExecutionOptions().getFailureAction(), fetchFlow.getExecutionOptions().getFailureAction()); Assert.assertEquals(new HashSet<>(flow.getEndNodes()), new HashSet<>(fetchFlow.getEndNodes())); } @Test public void testUploadExecutableNode() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow = createExecutableFlow(10, "exec1"); flow.setExecutionId(10); final File jobFile = ExecutionsTestUtil.getFlowFile("exectest1", "job10.job"); final Props props = new Props(null, jobFile); props.put("test", "test2"); final ExecutableNode oldNode = flow.getExecutableNode("job10"); oldNode.setStartTime(System.currentTimeMillis()); loader.uploadExecutableNode(oldNode, props); final ExecutableJobInfo info = loader.fetchJobInfo(10, "job10", 0); Assert.assertEquals(flow.getExecutionId(), info.getExecId()); Assert.assertEquals(flow.getProjectId(), info.getProjectId()); Assert.assertEquals(flow.getVersion(), info.getVersion()); Assert.assertEquals(flow.getFlowId(), info.getFlowId()); Assert.assertEquals(oldNode.getId(), info.getJobId()); Assert.assertEquals(oldNode.getStatus(), info.getStatus()); Assert.assertEquals(oldNode.getStartTime(), info.getStartTime()); Assert.assertEquals("endTime = " + oldNode.getEndTime() + " info endTime = " + info.getEndTime(), oldNode.getEndTime(), info.getEndTime()); // Fetch props final Props outputProps = new Props(); outputProps.put("hello", "output"); oldNode.setOutputProps(outputProps); oldNode.setEndTime(System.currentTimeMillis()); loader.updateExecutableNode(oldNode); final Props fInputProps = loader.fetchExecutionJobInputProps(10, "job10"); final Props fOutputProps = loader.fetchExecutionJobOutputProps(10, "job10"); final Pair<Props, Props> inOutProps = loader.fetchExecutionJobProps(10, "job10"); Assert.assertEquals(fInputProps.get("test"), "test2"); Assert.assertEquals(fOutputProps.get("hello"), "output"); Assert.assertEquals(inOutProps.getFirst().get("test"), "test2"); Assert.assertEquals(inOutProps.getSecond().get("hello"), "output"); } /* Test exception when unassigning an missing execution */ @Test public void testUnassignExecutorException() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); try { loader.unassignExecutor(2); Assert.fail("Expecting exception, but didn't get one"); } catch (final ExecutorManagerException ex) { System.out.println("Test true"); } } /* Test happy case when unassigning executor for a flow execution */ @Test public void testUnassignExecutor() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final String host = "localhost"; final int port = 12345; final Executor executor = loader.addExecutor(host, port); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); loader.assignExecutor(executor.getId(), flow.getExecutionId()); Assert.assertEquals( loader.fetchExecutorByExecutionId(flow.getExecutionId()), executor); loader.unassignExecutor(flow.getExecutionId()); Assert.assertEquals( loader.fetchExecutorByExecutionId(flow.getExecutionId()), null); } /* Test exception when assigning a non-existent executor to a flow */ @Test public void testAssignExecutorInvalidExecutor() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); try { loader.assignExecutor(flow.getExecutionId(), 1); Assert.fail("Expecting exception, but didn't get one"); } catch (final ExecutorManagerException ex) { System.out.println("Test true"); } } /* Test exception when assigning an executor to a non-existent flow execution */ @Test public void testAssignExecutorInvalidExecution() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final String host = "localhost"; final int port = 12345; final Executor executor = loader.addExecutor(host, port); try { loader.assignExecutor(2, executor.getId()); Assert.fail("Expecting exception, but didn't get one"); } catch (final ExecutorManagerException ex) { System.out.println("Test true"); } } /* Test null return when an invalid execution flows */ @Test public void testFetchMissingExecutorByExecution() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); Assert.assertEquals(loader.fetchExecutorByExecutionId(1), null); } /* Test null return when for a non-dispatched execution */ @Test public void testFetchExecutorByQueuedExecution() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); Assert.assertEquals(loader.fetchExecutorByExecutionId(flow.getExecutionId()), null); } /* Test happy case when assigning and fetching an executor to a flow execution */ @Test public void testAssignAndFetchExecutor() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final String host = "localhost"; final int port = 12345; final Executor executor = loader.addExecutor(host, port); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); loader.assignExecutor(executor.getId(), flow.getExecutionId()); Assert.assertEquals(loader.fetchExecutorByExecutionId(flow.getExecutionId()), executor); } /* Test fetchQueuedFlows when there are no queued flows */ @Test public void testFetchNoQueuedFlows() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Pair<ExecutionReference, ExecutableFlow>> queuedFlows = loader.fetchQueuedFlows(); // no execution flows at all i.e. no running, completed or queued flows Assert.assertTrue(queuedFlows.isEmpty()); final String host = "lcoalhost"; final int port = 12345; final Executor executor = loader.addExecutor(host, port); // When a flow is assigned an executor, it is no longer in queued state final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); loader.assignExecutor(executor.getId(), flow.getExecutionId()); Assert.assertTrue(queuedFlows.isEmpty()); // When flow status is finished, it is no longer in queued state final ExecutableFlow flow2 = TestUtils.createExecutableFlow("exectest1", "exec2"); loader.uploadExecutableFlow(flow2); flow2.setStatus(Status.SUCCEEDED); loader.updateExecutableFlow(flow2); Assert.assertTrue(queuedFlows.isEmpty()); } /* Test fetchQueuedFlows happy case */ @Test public void testFetchQueuedFlows() throws ExecutorManagerException, IOException { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow); final ExecutableFlow flow2 = TestUtils.createExecutableFlow("exectest1", "exec2"); loader.uploadExecutableFlow(flow2); final List<Pair<ExecutionReference, ExecutableFlow>> fetchedQueuedFlows = loader .fetchQueuedFlows(); Assert.assertEquals(2, fetchedQueuedFlows.size()); final Pair<ExecutionReference, ExecutableFlow> fetchedFlow1 = fetchedQueuedFlows.get(0); final Pair<ExecutionReference, ExecutableFlow> fetchedFlow2 = fetchedQueuedFlows.get(1); Assert.assertEquals(flow.getExecutionId(), fetchedFlow1.getSecond().getExecutionId()); Assert.assertEquals(flow.getFlowId(), fetchedFlow1.getSecond().getFlowId()); Assert.assertEquals(flow.getProjectId(), fetchedFlow1.getSecond().getProjectId()); Assert.assertEquals(flow2.getExecutionId(), fetchedFlow2.getSecond().getExecutionId()); Assert.assertEquals(flow2.getFlowId(), fetchedFlow2.getSecond().getFlowId()); Assert.assertEquals(flow2.getProjectId(), fetchedFlow2.getSecond().getProjectId()); } /* Test all executors fetch from empty executors */ @Test public void testFetchEmptyExecutors() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Executor> executors = loader.fetchAllExecutors(); Assert.assertEquals(executors.size(), 0); } /* Test active executors fetch from empty executors */ @Test public void testFetchEmptyActiveExecutors() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Executor> executors = loader.fetchActiveExecutors(); Assert.assertEquals(executors.size(), 0); } /* Test missing executor fetch with search by executor id */ @Test public void testFetchMissingExecutorId() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final Executor executor = loader.fetchExecutor(0); Assert.assertEquals(executor, null); } /* Test missing executor fetch with search by host:port */ @Test public void testFetchMissingExecutorHostPort() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final Executor executor = loader.fetchExecutor("localhost", 12345); Assert.assertEquals(executor, null); } /* Test executor events fetch from with no logged executor */ @Test public void testFetchEmptyExecutorEvents() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final Executor executor = new Executor(1, "localhost", 12345, true); final List<ExecutorLogEvent> executorEvents = loader.getExecutorEvents(executor, 5, 0); Assert.assertEquals(executorEvents.size(), 0); } /* Test logging ExecutorEvents */ @Test public void testExecutorEvents() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final int skip = 1; final User user = new User("testUser"); final Executor executor = new Executor(1, "localhost", 12345, true); final String message = "My message "; final EventType[] events = {EventType.CREATED, EventType.HOST_UPDATE, EventType.INACTIVATION}; for (final EventType event : events) { loader.postExecutorEvent(executor, event, user.getUserId(), message + event.getNumVal()); } final List<ExecutorLogEvent> eventLogs = loader.getExecutorEvents(executor, 10, skip); Assert.assertTrue(eventLogs.size() == 2); for (int index = 0; index < eventLogs.size(); ++index) { final ExecutorLogEvent eventLog = eventLogs.get(index); Assert.assertEquals(eventLog.getExecutorId(), executor.getId()); Assert.assertEquals(eventLog.getUser(), user.getUserId()); Assert.assertEquals(eventLog.getType(), events[index + skip]); Assert.assertEquals(eventLog.getMessage(), message + events[index + skip].getNumVal()); } } /* Test to add duplicate executors */ @Test public void testDuplicateAddExecutor() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); try { final String host = "localhost"; final int port = 12345; loader.addExecutor(host, port); loader.addExecutor(host, port); Assert.fail("Expecting exception, but didn't get one"); } catch (final ExecutorManagerException ex) { System.out.println("Test true"); } } /* Test to try update a non-existent executor */ @Test public void testMissingExecutorUpdate() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); try { final Executor executor = new Executor(1, "localhost", 1234, true); loader.updateExecutor(executor); Assert.fail("Expecting exception, but didn't get one"); } catch (final ExecutorManagerException ex) { System.out.println("Test true"); } clearDB(); } /* Test add & fetch by Id Executors */ @Test public void testSingleExecutorFetchById() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Executor> executors = addTestExecutors(loader); for (final Executor executor : executors) { final Executor fetchedExecutor = loader.fetchExecutor(executor.getId()); Assert.assertEquals(executor, fetchedExecutor); } } /* Test fetch all executors */ @Test public void testFetchAllExecutors() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Executor> executors = addTestExecutors(loader); executors.get(0).setActive(false); loader.updateExecutor(executors.get(0)); final List<Executor> fetchedExecutors = loader.fetchAllExecutors(); Assert.assertEquals(executors.size(), fetchedExecutors.size()); Assert.assertArrayEquals(executors.toArray(), fetchedExecutors.toArray()); } /* Test fetch only active executors */ @Test public void testFetchActiveExecutors() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Executor> executors = addTestExecutors(loader); executors.get(0).setActive(false); loader.updateExecutor(executors.get(0)); final List<Executor> fetchedExecutors = loader.fetchActiveExecutors(); Assert.assertEquals(executors.size(), fetchedExecutors.size() + 1); executors.remove(0); Assert.assertArrayEquals(executors.toArray(), fetchedExecutors.toArray()); } /* Test add & fetch by host:port Executors */ @Test public void testSingleExecutorFetchHostPort() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final List<Executor> executors = addTestExecutors(loader); for (final Executor executor : executors) { final Executor fetchedExecutor = loader.fetchExecutor(executor.getHost(), executor.getPort()); Assert.assertEquals(executor, fetchedExecutor); } } /* Helper method used in methods testing jdbc interface for executors table */ private List<Executor> addTestExecutors(final ExecutorLoader loader) throws ExecutorManagerException { final List<Executor> executors = new ArrayList<>(); executors.add(loader.addExecutor("localhost1", 12345)); executors.add(loader.addExecutor("localhost2", 12346)); executors.add(loader.addExecutor("localhost1", 12347)); return executors; } /* Test Executor Inactivation */ @Test public void testExecutorInactivation() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final Executor executor = loader.addExecutor("localhost1", 12345); Assert.assertTrue(executor.isActive()); executor.setActive(false); loader.updateExecutor(executor); final Executor fetchedExecutor = loader.fetchExecutor(executor.getId()); Assert.assertEquals(executor.getHost(), fetchedExecutor.getHost()); Assert.assertEquals(executor.getId(), fetchedExecutor.getId()); Assert.assertEquals(executor.getPort(), fetchedExecutor.getPort()); Assert.assertFalse(fetchedExecutor.isActive()); } /* Test Removing Executor */ @Test public void testRemovingExecutor() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final Executor executor = loader.addExecutor("localhost1", 12345); Assert.assertNotNull(executor); loader.removeExecutor("localhost1", 12345); final Executor fetchedExecutor = loader.fetchExecutor("localhost1", 12345); Assert.assertNull(fetchedExecutor); } /* Test Executor reactivation */ @Test public void testExecutorActivation() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final Executor executor = loader.addExecutor("localhost1", 12345); Assert.assertTrue(executor.isActive()); executor.setActive(false); loader.updateExecutor(executor); Executor fetchedExecutor = loader.fetchExecutor(executor.getId()); Assert.assertFalse(fetchedExecutor.isActive()); executor.setActive(true); loader.updateExecutor(executor); fetchedExecutor = loader.fetchExecutor(executor.getId()); Assert.assertEquals(executor, fetchedExecutor); } @Test public void testFetchActiveFlowsExecutorAssigned() throws Exception { if (!isTestSetup()) { return; } // Upload flow1, executor assigned final ExecutorLoader loader = createLoader(); final ExecutableFlow flow1 = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow1); final Executor executor = loader.addExecutor("test", 1); loader.assignExecutor(executor.getId(), flow1.getExecutionId()); // Upload flow2, executor not assigned final ExecutableFlow flow2 = TestUtils.createExecutableFlow("exectest1", "exec2"); loader.uploadExecutableFlow(flow2); final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows1 = loader.fetchActiveFlows(); Assert.assertTrue(activeFlows1.containsKey(flow1.getExecutionId())); Assert.assertFalse(activeFlows1.containsKey(flow2.getExecutionId())); final ExecutableFlow flow1Result = activeFlows1.get(flow1.getExecutionId()).getSecond(); Assert.assertNotNull(flow1Result); Assert.assertTrue(flow1 != flow1Result); Assert.assertEquals(flow1.getExecutionId(), flow1Result.getExecutionId()); Assert.assertEquals(flow1.getEndTime(), flow1Result.getEndTime()); Assert.assertEquals(flow1.getStartTime(), flow1Result.getStartTime()); Assert.assertEquals(flow1.getSubmitTime(), flow1Result.getSubmitTime()); Assert.assertEquals(flow1.getFlowId(), flow1Result.getFlowId()); Assert.assertEquals(flow1.getProjectId(), flow1Result.getProjectId()); Assert.assertEquals(flow1.getVersion(), flow1Result.getVersion()); Assert.assertEquals(flow1.getExecutionOptions().getFailureAction(), flow1Result.getExecutionOptions().getFailureAction()); } @Test public void testFetchActiveFlowsStatusChanged() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow1 = TestUtils.createExecutableFlow("exectest1", "exec1"); // Flow status is PREPARING when uploaded, should be in active flows loader.uploadExecutableFlow(flow1); final Executor executor = loader.addExecutor("test", 1); loader.assignExecutor(executor.getId(), flow1.getExecutionId()); Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows = loader.fetchActiveFlows(); Assert.assertTrue(activeFlows.containsKey(flow1.getExecutionId())); // When flow status becomes SUCCEEDED/KILLED/FAILED, it should not be in active state flow1.setStatus(Status.SUCCEEDED); loader.updateExecutableFlow(flow1); activeFlows = loader.fetchActiveFlows(); Assert.assertFalse(activeFlows.containsKey(flow1.getExecutionId())); flow1.setStatus(Status.KILLED); loader.updateExecutableFlow(flow1); activeFlows = loader.fetchActiveFlows(); Assert.assertFalse(activeFlows.containsKey(flow1.getExecutionId())); flow1.setStatus(Status.FAILED); loader.updateExecutableFlow(flow1); activeFlows = loader.fetchActiveFlows(); Assert.assertFalse(activeFlows.containsKey(flow1.getExecutionId())); } @Test public void testFetchActiveFlowsReferenceChanged() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow1 = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow1); final Executor executor = loader.addExecutor("test", 1); loader.assignExecutor(executor.getId(), flow1.getExecutionId()); final ExecutionReference ref1 = new ExecutionReference(flow1.getExecutionId(), executor); loader.addActiveExecutableReference(ref1); final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> activeFlows1 = loader.fetchActiveFlows(); Assert.assertTrue(activeFlows1.containsKey(flow1.getExecutionId())); // Verify active flows are not fetched from active_executing_flows DB table any more loader.removeActiveExecutableReference(flow1.getExecutionId()); Assert.assertTrue(activeFlows1.containsKey(flow1.getExecutionId())); } @Test public void testFetchActiveFlowByExecId() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow1 = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow1); final Executor executor = loader.addExecutor("test", 1); loader.assignExecutor(executor.getId(), flow1.getExecutionId()); final Pair<ExecutionReference, ExecutableFlow> activeFlow1 = loader.fetchActiveFlowByExecId(flow1.getExecutionId()); final ExecutionReference execRef1 = activeFlow1.getFirst(); final ExecutableFlow execFlow1 = activeFlow1.getSecond(); Assert.assertNotNull(execRef1); Assert.assertNotNull(execFlow1); Assert.assertEquals(flow1.getExecutionId(), execFlow1.getExecutionId()); Assert.assertEquals(flow1.getFlowId(), execFlow1.getFlowId()); Assert.assertEquals(flow1.getProjectId(), execFlow1.getProjectId()); Assert.assertEquals(flow1.getVersion(), execFlow1.getVersion()); } @Test public void testFetchRecentlyFinishedFlows() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow1 = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow1); flow1.setStatus(Status.SUCCEEDED); flow1.setEndTime(DateTimeUtils.currentTimeMillis()); loader.updateExecutableFlow(flow1); //Flow just finished. Fetch recently finished flows immediately. Should get it. final List<ExecutableFlow> flows = loader.fetchRecentlyFinishedFlows( RECENTLY_FINISHED_LIFETIME); Assert.assertEquals(1, flows.size()); Assert.assertEquals(flow1.getExecutionId(), flows.get(0).getExecutionId()); Assert.assertEquals(flow1.getProjectName(), flows.get(0).getProjectName()); Assert.assertEquals(flow1.getFlowId(), flows.get(0).getFlowId()); Assert.assertEquals(flow1.getVersion(), flows.get(0).getVersion()); } @Test public void testFetchEmptyRecentlyFinishedFlows() throws Exception { if (!isTestSetup()) { return; } final ExecutorLoader loader = createLoader(); final ExecutableFlow flow1 = TestUtils.createExecutableFlow("exectest1", "exec1"); loader.uploadExecutableFlow(flow1); flow1.setStatus(Status.SUCCEEDED); flow1.setEndTime(DateTimeUtils.currentTimeMillis()); loader.updateExecutableFlow(flow1); //Todo jamiesjc: use java8.java.time api instead of jodatime //Mock flow finished time to be 2 min ago. DateTimeUtils.setCurrentMillisOffset(-FLOW_FINISHED_TIME.toMillis()); flow1.setEndTime(DateTimeUtils.currentTimeMillis()); loader.updateExecutableFlow(flow1); //Fetch recently finished flows within 1 min. Should be empty. final List<ExecutableFlow> flows = loader .fetchRecentlyFinishedFlows(RECENTLY_FINISHED_LIFETIME); Assert.assertTrue(flows.isEmpty()); } @Ignore @Test public void testSmallUploadLog() throws ExecutorManagerException { final File logDir = ExecutionsTestUtil.getFlowDir(LOG_TEST_DIR_NAME); final File[] smalllog = {new File(logDir, "log1.log"), new File(logDir, "log2.log"), new File(logDir, "log3.log")}; final ExecutorLoader loader = createLoader(); loader.uploadLogFile(1, "smallFiles", 0, smalllog); final LogData data = loader.fetchLogs(1, "smallFiles", 0, 0, 50000); Assert.assertNotNull(data); Assert.assertEquals("Logs length is " + data.getLength(), data.getLength(), 53); System.out.println(data.toString()); final LogData data2 = loader.fetchLogs(1, "smallFiles", 0, 10, 20); System.out.println(data2.toString()); Assert.assertNotNull(data2); Assert.assertEquals("Logs length is " + data2.getLength(), data2.getLength(), 20); } @Ignore @Test public void testLargeUploadLog() throws ExecutorManagerException { final File logDir = ExecutionsTestUtil.getFlowDir(LOG_TEST_DIR_NAME); // Multiple of 255 for Henry the Eigth final File[] largelog = {new File(logDir, "largeLog1.log"), new File(logDir, "largeLog2.log"), new File(logDir, "largeLog3.log")}; final ExecutorLoader loader = createLoader(); loader.uploadLogFile(1, "largeFiles", 0, largelog); final LogData logsResult = loader.fetchLogs(1, "largeFiles", 0, 0, 64000); Assert.assertNotNull(logsResult); Assert.assertEquals("Logs length is " + logsResult.getLength(), logsResult.getLength(), 64000); final LogData logsResult2 = loader.fetchLogs(1, "largeFiles", 0, 1000, 64000); Assert.assertNotNull(logsResult2); Assert.assertEquals("Logs length is " + logsResult2.getLength(), logsResult2.getLength(), 64000); final LogData logsResult3 = loader.fetchLogs(1, "largeFiles", 0, 330000, 400000); Assert.assertNotNull(logsResult3); Assert.assertEquals("Logs length is " + logsResult3.getLength(), logsResult3.getLength(), 5493); final LogData logsResult4 = loader.fetchLogs(1, "largeFiles", 0, 340000, 400000); Assert.assertNull(logsResult4); final LogData logsResult5 = loader.fetchLogs(1, "largeFiles", 0, 153600, 204800); Assert.assertNotNull(logsResult5); Assert.assertEquals("Logs length is " + logsResult5.getLength(), logsResult5.getLength(), 181893); final LogData logsResult6 = loader.fetchLogs(1, "largeFiles", 0, 150000, 250000); Assert.assertNotNull(logsResult6); Assert.assertEquals("Logs length is " + logsResult6.getLength(), logsResult6.getLength(), 185493); } @Ignore @Test public void testRemoveExecutionLogsByTime() throws ExecutorManagerException, IOException, InterruptedException { final ExecutorLoader loader = createLoader(); final File logDir = ExecutionsTestUtil.getFlowDir(LOG_TEST_DIR_NAME); // Multiple of 255 for Henry the Eigth final File[] largelog = {new File(logDir, "largeLog1.log"), new File(logDir, "largeLog2.log"), new File(logDir, "largeLog3.log")}; final DateTime time1 = DateTime.now(); loader.uploadLogFile(1, "oldlog", 0, largelog); // sleep for 5 seconds Thread.currentThread().sleep(5000); loader.uploadLogFile(2, "newlog", 0, largelog); final DateTime time2 = time1.plusMillis(2500); final int count = loader.removeExecutionLogsByTime(time2.getMillis()); System.out.print("Removed " + count + " records"); LogData logs = loader.fetchLogs(1, "oldlog", 0, 0, 22222); Assert.assertTrue(logs == null); logs = loader.fetchLogs(2, "newlog", 0, 0, 22222); Assert.assertFalse(logs == null); } private ExecutableFlow createExecutableFlow(final int executionId, final String flowName) throws IOException { final ExecutableFlow execFlow = TestUtils.createExecutableFlow("exectest1", flowName); execFlow.setExecutionId(executionId); return execFlow; } private ExecutorLoader createLoader() { final Props props = new Props(); props.put("database.type", "mysql"); props.put("mysql.host", host); props.put("mysql.port", port); props.put("mysql.user", user); props.put("mysql.database", database); props.put("mysql.password", password); props.put("mysql.numconnections", numConnections); //TODO kunkun-tang: temporary work-around here. This Test is to be deprecated. return new JdbcExecutorLoader(props, new CommonMetrics(new MetricsManager(new MetricRegistry())), null , null, null, null, null, null, null); } private boolean isTestSetup() { if (!testDBExists) { System.err.println("Skipping DB test because Db not setup."); return false; } System.out.println("Running DB test because Db setup."); return true; } public static class CountHandler implements ResultSetHandler<Integer> { @Override public Integer handle(final ResultSet rs) throws SQLException { int val = 0; while (rs.next()) { val++; } return val; } } }
1
14,543
So the tests run only when the db exists? Else everything passes?
azkaban-azkaban
java
@@ -0,0 +1,12 @@ +package bytesink + +import "io" + +// ByteSink represents a location to which bytes can be written. The ByteSink +// should be closed after all bytes have been written. +type ByteSink interface { + io.Writer + io.Closer + Open() error + ID() string +}
1
1
18,324
`Remove` is unclear. It seems the point is to allow the `FifoByteSink` to delete the underlying file. There seems to be an implicit contract that consumers of this interface call `Remove` after `Close` without necessarily understanding what `Remove` does. I get that it's a little awkward to have `FifoByteSink` delete the file in `Close`. It would probably be best to just remove this from the interface, since `AddSink` is working with an actual `FifoByteSink`. I would rename it in `FifoByteSync` to `DeleteFile` or `Cleanup` or something that's makes it more clear what's being removed.
filecoin-project-venus
go
@@ -0,0 +1,13 @@ +package core + +import ( + "math/big" + + "github.com/filecoin-project/go-filecoin/types" +) + +type Ask struct { + Price *big.Int + Size *big.Int + Miner types.Address +}
1
1
10,222
Mentioned elsewhere but repeating again: would be nice to have units (ideally `Size *Bytes`).
filecoin-project-venus
go
@@ -269,7 +269,7 @@ describe "Bolt::CLI" do cli.execute(cli.parse) end - it 'install modules from Puppetfile with resolving' do + it 'install modules from Puppetfile without resolving' do cli = Bolt::CLI.new(%W[module install --project #{project} --no-resolve]) allow(installer).to receive(:install) do |*args|
1
# frozen_string_literal: true require 'spec_helper' require 'bolt_spec/files' require 'bolt_spec/task' require 'bolt_spec/project' require 'bolt/cli' require 'bolt/util' require 'concurrent/utility/processor_counter' require 'r10k/action/puppetfile/install' require 'yaml' describe "Bolt::CLI" do include BoltSpec::Files include BoltSpec::Task let(:inventory) { Bolt::Inventory.empty } let(:target) { inventory.get_target('foo') } before(:each) do outputter = Bolt::Outputter::Human.new(false, false, false, StringIO.new) allow_any_instance_of(Bolt::CLI).to receive(:outputter).and_return(outputter) allow_any_instance_of(Bolt::CLI).to receive(:warn) # Don't print error messages to the console allow($stdout).to receive(:puts) # Don't allow tests to override the captured log config allow(Bolt::Logger).to receive(:configure) Logging.logger[:root].level = :info end def stub_file(path) stat = double('stat', readable?: true, file?: true, directory?: false) allow(Bolt::Util).to receive(:file_stat).with(path).and_return(stat) end def stub_non_existent_file(path) allow(Bolt::Util).to receive(:file_stat).with(path).and_raise( Errno::ENOENT, "No such file or directory @ rb_file_s_stat - #{path}" ) end def stub_unreadable_file(path) stat = double('stat', readable?: false, file?: true) allow(Bolt::Util).to receive(:file_stat).with(path).and_return(stat) end def stub_directory(path) stat = double('stat', readable?: true, file?: false, directory?: true) allow(Bolt::Util).to receive(:file_stat).with(path).and_return(stat) end def stub_config(file_content = {}) allow(Bolt::Util).to receive(:read_yaml_hash).and_return(file_content) allow(Bolt::Util).to receive(:read_optional_yaml_hash).and_return(file_content) end context 'gem install' do around(:each) do |example| original_value = ENV['BOLT_GEM'] example.run ensure ENV['BOLT_GEM'] = original_value end it 'displays a warning when Bolt is installed as a gem' do ENV.delete('BOLT_GEM') cli = Bolt::CLI.new(%w[task show]) allow(cli).to receive(:incomplete_install?).and_return(true) cli.execute(cli.parse) output = @log_output.readlines.join expect(output).to match(/Bolt may be installed as a gem/) end it 'does not display a warning when BOLT_GEM is set' do ENV['BOLT_GEM'] = 'true' cli = Bolt::CLI.new(%w[task show]) allow(cli).to receive(:incomplete_install?).and_return(true) cli.execute(cli.parse) output = @log_output.readlines.join expect(output).not_to match(/Bolt may be installed as a gem/) end end context 'guide' do let(:config) { double('config', format: nil) } let(:topic) { 'project' } context '#guides' do it 'returns a hash of topics and filepaths to guides' do expect(Dir).to receive(:children).and_return(['milo.txt']) cli = Bolt::CLI.new(['guide']) expect(cli.guides).to match( 'milo' => %r{guides/milo.txt} ) end end context '#list_topics' do it 'lists topics' do cli = Bolt::CLI.new(['guide']) expect(cli.outputter).to receive(:print_topics).with(cli.guides.keys) cli.list_topics end it 'returns 0' do cli = Bolt::CLI.new(['guide']) expect(cli.list_topics).to eq(0) end end context '#show_guide' do before(:each) do allow_any_instance_of(Bolt::CLI).to receive(:analytics).and_return(Bolt::Analytics::NoopClient.new) end it 'prints a guide for a known topic' do Tempfile.create do |file| content = "The trials and tribulations of Bolty McBoltface\n" File.write(file, content) cli = Bolt::CLI.new(['guide', topic]) allow(cli).to receive(:guides).and_return(topic => file.path) expect(cli.outputter).to receive(:print_guide).with(content, topic) cli.show_guide(topic) end end it 'submits a known_topic analytics event' do cli = Bolt::CLI.new(['guide', topic]) expect(cli.analytics).to receive(:event).with('Guide', 'known_topic', label: topic) cli.show_guide(topic) end it 'prints a list of topics when given an unknown topic' do topic = 'boltymcboltface' cli = Bolt::CLI.new(['guide', topic]) allow(cli).to receive(:config).and_return(config) expect(cli).to receive(:list_topics) expect(cli.outputter).to receive(:print_message).with(/Did not find guide for topic '#{topic}'/) cli.show_guide(topic) end it 'submits an uknown_topic analytics event' do topic = 'boltymcboltface' cli = Bolt::CLI.new(['guide', topic]) allow(cli).to receive(:config).and_return(config) expect(cli.analytics).to receive(:event).with('Guide', 'unknown_topic', label: topic) cli.show_guide(topic) end it 'returns 0' do cli = Bolt::CLI.new(['guide', topic]) expect(cli.show_guide(topic)).to eq(0) end end end context 'module' do include BoltSpec::Project let(:cli) { Bolt::CLI.new(command + %W[--project #{project_path}]) } let(:command) { %w[module show] } let(:installer) { double('installer', add: true, install: true) } around(:each) do |example| with_project do example.run end end before(:each) do allow(Bolt::ModuleInstaller).to receive(:new).and_return(installer) end it 'errors without modules configured' do expect { cli.parse }.to raise_error( Bolt::CLIError, /Unable to use command/ ) end context 'with modules configured' do let(:project_config) { { 'modules' => [] } } it 'does not error' do result = cli.execute(cli.parse) expect(result).to eq(0) end end context 'add' do let(:project_config) { { 'modules' => [] } } it 'errors without a module' do cli = Bolt::CLI.new(%W[module add --project #{project}]) expect { cli.parse }.to raise_error( Bolt::CLIError, /Must specify a module name/ ) end it 'errors with multiple modules' do cli = Bolt::CLI.new(%W[module add foo bar --project #{project}]) expect { cli.parse }.to raise_error( Bolt::CLIError, /Unknown argument/ ) end it 'runs with a single module' do cli = Bolt::CLI.new(%W[module add puppetlabs-yaml --project #{project}]) expect(installer).to receive(:add) cli.execute(cli.parse) end it 'passes force' do cli = Bolt::CLI.new(%W[module add puppetlabs-yaml --project #{project} --force]) allow(installer).to receive(:install) do |*args| expect(args).to include({ force: true }) end cli.execute(cli.parse) end end context 'install' do let(:command) { %W[module install --project #{project}] } let(:project_config) { { 'modules' => [] } } it 'errors with extra arguments' do cli = Bolt::CLI.new(%W[module install puppetlabs-yaml --project #{project}]) expect { cli.parse }.to raise_error( Bolt::CLIError, /Invalid argument.*bolt module add/ ) end it 'does nothing if project config has no module declarations' do allow(project).to receive(:modules).and_return([]) result = cli.execute(cli.parse) expect(result).to eq(0) expect((project_path + 'Puppetfile').exist?).to eq(false) expect((project_path + '.modules').exist?).to eq(false) end it 'runs' do expect(installer).to receive(:install) cli.execute(cli.parse) end it 'installs project modules forcibly' do cli = Bolt::CLI.new(%W[module install --project #{project} --force]) allow(installer).to receive(:install) do |*args| expect(args).to include({ force: true, resolve: nil }) end cli.execute(cli.parse) end it 'install modules from Puppetfile with resolving' do cli = Bolt::CLI.new(%W[module install --project #{project} --no-resolve]) allow(installer).to receive(:install) do |*args| expect(args).to include({ force: nil, resolve: false }) end cli.execute(cli.parse) end end end context 'plan new' do let(:project_name) { 'project' } let(:config) { { 'name' => project_name } } let(:project_path) { @project_dir } let(:config_path) { File.join(project_path, 'bolt-project.yaml') } let(:command) { %W[plan new #{plan_name}] } let(:cli) { Bolt::CLI.new(command) } let(:plan_name) { project_name } let(:project) { Bolt::Project.create_project(project_path) } around(:each) do |example| Dir.mktmpdir(nil, Dir.pwd) do |dir| @project_dir = dir example.run end end before(:each) do File.write(config_path, config.to_yaml) allow(Bolt::Project).to receive(:create_project).and_return(project) end it 'errors without a plan name' do cli = Bolt::CLI.new(%w[plan new]) expect { cli.parse }.to raise_error( Bolt::CLIError, /Must specify a plan name/ ) end it 'calls #new_plan' do allow(cli).to receive(:new_plan).and_return(0) expect(cli).to receive(:new_plan).with(plan_name) cli.execute(cli.parse) end describe '#new_plan' do it 'errors without a named project' do allow(project).to receive(:name).and_return(nil) cli.parse expect { cli.new_plan(plan_name) }.to raise_error( Bolt::Error, /Project directory '.*' is not a named project/ ) end it 'errors when the plan name is invalid' do cli.parse %w[Foo foo-bar foo:: foo::Bar foo::1bar ::foo].each do |plan_name| expect { cli.new_plan(plan_name) }.to raise_error( Bolt::ValidationError, /Invalid plan name '#{plan_name}'/ ) end end it 'errors if the first name segment is not the project name' do plan_name = 'plan' cli.parse expect { cli.new_plan(plan_name) }.to raise_error( Bolt::ValidationError, /First segment of plan name '#{plan_name}' must match project name/ ) end %w[pp yaml].each do |ext| it "errors if there is an existing #{ext} plan with the same name" do plan_path = File.join(project_path, 'plans', "init.#{ext}") FileUtils.mkdir(File.dirname(plan_path)) FileUtils.touch(plan_path) cli.parse expect { cli.new_plan(plan_name) }.to raise_error( Bolt::Error, /A plan with the name '#{plan_name}' already exists/ ) end end it "creates a missing 'plans' directory" do cli.parse expect(Dir.exist?(project.plans_path)).to eq(false) cli.new_plan(plan_name) expect(Dir.exist?(project.plans_path)).to eq(true) end it 'creates a missing directory structure' do plan_name = "#{project_name}::foo::bar" cli.parse expect(Dir.exist?(project.plans_path + 'foo')).to eq(false) cli.new_plan(plan_name) expect(Dir.exist?(project.plans_path + 'foo')).to eq(true) end it 'catches existing file errors when creating directories' do plan_name = "#{project_name}::foo::bar" FileUtils.mkdir(File.join(project_path, 'plans')) FileUtils.touch(File.join(project_path, 'plans', 'foo')) cli.parse expect { cli.new_plan(plan_name) }.to raise_error( Bolt::Error, /unable to create plan directory/ ) end it "creates an 'init' plan when the plan name matches the project name" do cli.parse cli.new_plan(plan_name) plan_path = project.plans_path + 'init.yaml' expect(File.exist?(plan_path)).to eq(true) end it 'creates a plan' do plan_name = "#{project_name}::foo" cli.parse cli.new_plan(plan_name) plan_path = project.plans_path + 'foo.yaml' expect(File.size?(plan_path)).to be end it 'outputs the path to the plan and other helpful information' do cli.parse allow(cli.outputter).to receive(:print_message) do |output| expect(output).to match( /Created plan '#{plan_name}' at '#{project.plans_path + 'init.yaml'}'/ ) expect(output).to match( /bolt plan show #{plan_name}/ ) expect(output).to match( /bolt plan run #{plan_name}/ ) end cli.new_plan(plan_name) end it "warns that 'plan new' is experimental" do cli.parse cli.new_plan(plan_name) expect(@log_output.readlines).to include( /Command 'bolt plan new' is experimental/ ) end end end context "without a config file" do let(:project) { Bolt::Project.new({}, '.') } before(:each) do allow(Bolt::Project).to receive(:find_boltdir).and_return(project) allow_any_instance_of(Bolt::Project).to receive(:resource_types) allow(Bolt::Util).to receive(:read_yaml_hash).and_return({}) allow(Bolt::Util).to receive(:read_optional_yaml_hash).and_return({}) end it "generates an error message if an unknown argument is given" do cli = Bolt::CLI.new(%w[command run --unknown]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Unknown argument '--unknown'/) end it "generates an error message if an unknown subcommand is given" do cli = Bolt::CLI.new(%w[--targets bolt1 bolt2 command run whoami]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Expected subcommand 'bolt2' to be one of/) end it "generates an error message if an unknown action is given" do cli = Bolt::CLI.new(%w[--targets bolt1 command oops whoami]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Expected action 'oops' to be one of/) end it "generates an error message is no action is given and one is expected" do cli = Bolt::CLI.new(%w[--targets bolt1 command]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Expected an action/) end it "works without an action if no action is expected" do cli = Bolt::CLI.new(%w[--targets bolt1 apply file.pp]) expect { cli.parse }.not_to raise_error end describe "help" do it "generates help when no arguments are specified" do cli = Bolt::CLI.new([]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/USAGE.*bolt/m).to_stdout end it "accepts --help" do cli = Bolt::CLI.new(%w[--help]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/USAGE.*bolt/m).to_stdout end context 'listing actions with help' do it 'accepts command' do cli = Bolt::CLI.new(%w[help command]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*run/m).to_stdout end it 'accepts script' do cli = Bolt::CLI.new(%w[help script]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*run/m).to_stdout end it 'accepts task' do cli = Bolt::CLI.new(%w[help task]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*run.*show/m).to_stdout end it 'accepts plan' do cli = Bolt::CLI.new(%w[help plan]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*run.*show/m).to_stdout end it 'accepts file' do cli = Bolt::CLI.new(%w[help file]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*download.*upload/m).to_stdout end it 'accepts puppetfile' do cli = Bolt::CLI.new(%w[help puppetfile]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*install.*show-modules/m).to_stdout end it 'accepts inventory' do cli = Bolt::CLI.new(%w[help inventory]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/ACTIONS.*show/m).to_stdout end it 'excludes invalid subcommand flags' do cli = Bolt::CLI.new(%w[help puppetfile]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.not_to output(/--private-key/).to_stdout end it 'excludes invalid subcommand action flags and help text' do cli = Bolt::CLI.new(%w[help plan show]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.not_to output(/[parameters].*nodes/m).to_stdout end it 'accepts apply' do cli = Bolt::CLI.new(%w[help apply]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/USAGE.*bolt apply \[manifest.pp\]/m).to_stdout end end end describe "version" do it "emits a version string" do cli = Bolt::CLI.new(%w[--version]) expect { expect { cli.parse }.to raise_error(Bolt::CLIExit) }.to output(/\d+\.\d+\.\d+/).to_stdout end end describe "nodes" do let(:targets) { [target, Bolt::Target.new('bar')] } it "accepts a single node" do cli = Bolt::CLI.new(%w[command run uptime --targets foo]) options = cli.parse cli.update_targets(options) expect(options).to include(targets: [target]) end it "accepts multiple nodes" do cli = Bolt::CLI.new(%w[command run uptime --targets foo,bar]) options = cli.parse cli.update_targets(options) expect(options).to include(targets: targets) end it "accepts multiple nodes across multiple declarations" do cli = Bolt::CLI.new(%w[command run uptime --targets foo,bar --targets bar,more,bars]) options = cli.parse cli.update_targets(options) extra_targets = [Bolt::Target.new('more'), Bolt::Target.new('bars')] expect(options).to include(targets: targets + extra_targets) end it "reads from stdin when --targets is '-'" do nodes = <<~'NODES' foo bar NODES cli = Bolt::CLI.new(%w[command run uptime --targets -]) allow($stdin).to receive(:read).and_return(nodes) options = cli.parse cli.update_targets(options) expect(options[:targets]).to eq(targets) end it "reads from a file when --targets starts with @" do nodes = <<~'NODES' foo bar NODES with_tempfile_containing('nodes-args', nodes) do |file| cli = Bolt::CLI.new(%W[command run uptime --targets @#{file.path}]) options = cli.parse cli.update_targets(options) expect(options[:targets]).to eq(targets) end end it "strips leading and trailing whitespace" do nodes = " foo\nbar \nbaz\nqux " with_tempfile_containing('nodes-args', nodes) do |file| cli = Bolt::CLI.new(%W[command run uptime --targets @#{file.path}]) options = cli.parse cli.update_targets(options) extra_targets = [Bolt::Target.new('baz'), Bolt::Target.new('qux')] expect(options[:targets]).to eq(targets + extra_targets) end end it "expands tilde to a user directory when --targets starts with @" do expect(File).to receive(:read).with(File.join(Dir.home, 'nodes.txt')).and_return("foo\nbar\n") cli = Bolt::CLI.new(%w[command run uptime --targets @~/nodes.txt]) allow(cli).to receive(:puppetdb_client) options = cli.parse cli.update_targets(options) expect(options[:targets]).to eq(targets) end it "generates an error message if no nodes given" do cli = Bolt::CLI.new(%w[command run uptime --targets]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--targets' needs a parameter/) end it "generates an error message if nodes is omitted" do cli = Bolt::CLI.new(%w[command run uptime]) options = cli.parse expect { cli.update_targets(options) }.to raise_error(Bolt::CLIError, /Command requires a targeting option/) end end describe "targets" do let(:targets) { [target, Bolt::Target.new('bar')] } it "reads from a file when --targets starts with @" do nodes = <<~'NODES' foo bar NODES with_tempfile_containing('nodes-args', nodes) do |file| cli = Bolt::CLI.new(%W[command run uptime --targets @#{file.path}]) options = cli.parse cli.update_targets(options) expect(options[:targets]).to eq(targets) end end it "generates an error message if no targets are given" do cli = Bolt::CLI.new(%w[command run uptime --targets]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--targets' needs a parameter/) end end describe "query" do it "accepts a query" do cli = Bolt::CLI.new(%w[command run id --query nodes{}]) allow(cli).to receive(:query_puppetdb_nodes).and_return([]) result = cli.parse expect(result[:query]).to eq('nodes{}') end it "resolves targets based on the query" do cli = Bolt::CLI.new(%w[command run id --query nodes{}]) allow(cli).to receive(:query_puppetdb_nodes).and_return(%w[foo bar]) targets = [Bolt::Target.new('foo'), Bolt::Target.new('bar')] options = cli.parse cli.update_targets(options) expect(options[:targets]).to eq(targets) end it "fails if it can't retrieve targets from PuppetDB" do cli = Bolt::CLI.new(%w[command run id --query nodes{}]) puppetdb = double('puppetdb') allow(puppetdb).to receive(:query_certnames).and_raise(Bolt::PuppetDBError, "failed to puppetdb the nodes") allow(cli).to receive(:puppetdb_client).and_return(puppetdb) options = cli.parse expect { cli.update_targets(options) } .to raise_error(Bolt::PuppetDBError, /failed to puppetdb the nodes/) end it "fails if both --targets and --query are specified" do cli = Bolt::CLI.new(%w[command run id --query nodes{} --targets foo,bar]) options = cli.parse expect { cli.update_targets(options) }.to raise_error(Bolt::CLIError, /Only one/) end end describe "user" do it "accepts a user" do cli = Bolt::CLI.new(%w[command run uptime --user root --targets foo]) expect(cli.parse).to include(user: 'root') end it "generates an error message if no user value is given" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --user]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--user' needs a parameter/) end end describe "password" do it "accepts a password" do cli = Bolt::CLI.new(%w[command run uptime --password opensesame --targets foo]) expect(cli.parse).to include(password: 'opensesame') end end describe "password-prompt" do it "prompts the user for password" do allow($stdin).to receive(:noecho).and_return('opensesame') allow($stderr).to receive(:print).with('Please enter your password: ') allow($stderr).to receive(:puts) cli = Bolt::CLI.new(%w[command run uptime --targets foo --password-prompt]) expect(cli.parse).to include(password: 'opensesame') end end describe "key" do it "accepts a private key" do allow(Bolt::Util).to receive(:validate_file).and_return(true) path = File.expand_path('~/.ssh/google_compute_engine') cli = Bolt::CLI.new(%W[ command run uptime --private-key #{path} --targets foo]) expect(cli.parse).to include('private-key': path) expect(cli.config.transports['ssh']['private-key']).to eq(File.expand_path(path)) end it "expands private key relative to cwd" do allow(Bolt::Util).to receive(:validate_file).and_return(true) path = './ssh/google_compute_engine' cli = Bolt::CLI.new(%W[ command run uptime --private-key #{path} --targets foo]) expect(cli.parse).to include('private-key': File.expand_path(path)) expect(cli.config.transports['ssh']['private-key']).to eq(File.expand_path(path)) end it "generates an error message if no key value is given" do cli = Bolt::CLI.new(%w[command run --targets foo --private-key]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--private-key' needs a parameter/) end end describe "concurrency" do it "accepts a concurrency limit" do cli = Bolt::CLI.new(%w[command run uptime --concurrency 10 --targets foo]) expect(cli.parse).to include(concurrency: 10) end it "defaults to 100 with sufficient ulimit" do cli = Bolt::CLI.new(%w[command run uptime --targets foo]) cli.parse expect(cli.config.concurrency).to eq(100) end it "generates an error message if no concurrency value is given" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --concurrency]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--concurrency' needs a parameter/) end end describe "compile-concurrency" do it "accepts a concurrency limit" do cli = Bolt::CLI.new(%w[command run uptime --compile-concurrency 2 --targets foo]) expect(cli.parse).to include('compile-concurrency': 2) end it "defaults to unset" do cli = Bolt::CLI.new(%w[command run uptime --targets foo]) cli.parse # verifies Etc.nprocessors is the same as Concurrent.processor_count expect(cli.config.compile_concurrency).to eq(Concurrent.processor_count) end it "generates an error message if no concurrency value is given" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --compile-concurrency]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--compile-concurrency' needs a parameter/) end end describe "console log level" do it "is not sensitive to ordering of debug and verbose" do expect(Bolt::Logger).to receive(:configure).with(include('console' => { level: :debug }), true) cli = Bolt::CLI.new(%w[command run uptime --targets foo --debug --verbose]) cli.parse end it "errors when debug and log-level are both set" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --debug --log-level notice]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Only one of '--debug' or '--log-level' may be specified/) end it "warns when using debug" do expect(Bolt::Logger).to receive(:deprecation_warning) .with(anything, /Command line option '--debug' is deprecated/) cli = Bolt::CLI.new(%w[command run uptime --targets foo --debug]) cli.parse end it "log-level sets the log option" do expect(Bolt::Logger).to receive(:configure).with(include('console' => { level: 'debug' }), true) cli = Bolt::CLI.new(%w[command run uptime --targets foo --log-level debug]) cli.parse end it "raises a Bolt error when the level is a stringified integer" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --log-level 42]) expect { cli.parse }.to raise_error(Bolt::ValidationError, /level of log console must be one of/) end end describe "host-key-check" do it "accepts `--host-key-check`" do cli = Bolt::CLI.new(%w[command run uptime --host-key-check --targets foo]) cli.parse expect(cli.config.transports['ssh']['host-key-check']).to eq(true) end it "accepts `--no-host-key-check`" do cli = Bolt::CLI.new(%w[command run uptime --no-host-key-check --targets foo]) cli.parse expect(cli.config.transports['ssh']['host-key-check']).to eq(false) end it "defaults to nil" do cli = Bolt::CLI.new(%w[command run uptime --targets foo]) cli.parse expect(cli.config.transports['ssh']['host-key-check']).to eq(nil) end end describe "connect-timeout" do it "accepts a specific timeout" do cli = Bolt::CLI.new(%w[command run uptime --connect-timeout 123 --targets foo]) expect(cli.parse).to include('connect-timeout': 123) end it "generates an error message if no timeout value is given" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --connect-timeout]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--connect-timeout' needs a parameter/) end end describe "modulepath" do it "treats relative modulepath as relative to pwd" do site = File.expand_path('site') modulepath = [site, 'modules'].join(File::PATH_SEPARATOR) cli = Bolt::CLI.new(%W[command run uptime --modulepath #{modulepath} --targets foo]) expect(cli.parse).to include(modulepath: [site, File.expand_path('modules')]) end it "accepts shorthand -m" do site = File.expand_path('site') modulepath = [site, 'modules'].join(File::PATH_SEPARATOR) cli = Bolt::CLI.new(%W[command run uptime -m #{modulepath} --targets foo]) expect(cli.parse).to include(modulepath: [site, File.expand_path('modules')]) end it "generates an error message if no value is given" do cli = Bolt::CLI.new(%w[command run uptime --targets foo --modulepath]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Option '--modulepath' needs a parameter/) end end describe "puppetfile" do let(:puppetfile) { File.expand_path('/path/to/Puppetfile') } let(:cli) { Bolt::CLI.new(%W[puppetfile install --puppetfile #{puppetfile}]) } it 'uses a specified Puppetfile' do cli.parse expect(cli.config.puppetfile.to_s).to eq(puppetfile) end end describe "modules" do let(:modules) { 'puppetlabs-apt,puppetlabs-stdlib' } let(:cli) { Bolt::CLI.new(%W[project init --modules #{modules}]) } it 'accepts a comma-separated list of modules' do options = cli.parse expect(options[:modules]).to match([ { 'name' => 'puppetlabs-apt' }, { 'name' => 'puppetlabs-stdlib' } ]) end end describe "sudo" do it "supports running as a user" do cli = Bolt::CLI.new(%w[command run --targets foo whoami --run-as root]) expect(cli.parse[:'run-as']).to eq('root') end end describe "sudo-password" do it "accepts a password" do cli = Bolt::CLI.new(%w[command run uptime --sudo-password opensez --run-as alibaba --targets foo]) expect(cli.parse).to include('sudo-password': 'opensez') end end describe "sudo password-prompt" do it "prompts the user for escalation password" do allow($stdin).to receive(:noecho).and_return('opensesame') allow($stderr).to receive(:print).with('Please enter your privilege escalation password: ') allow($stderr).to receive(:puts) cli = Bolt::CLI.new(%w[command run uptime --targets foo --sudo-password-prompt]) expect(cli.parse).to include('sudo-password': 'opensesame') end end describe "filter" do it "raises an error when a filter has illegal characters" do cli = Bolt::CLI.new(%w[plan show --filter JSON]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Illegal characters in filter string/) end end describe "transport" do it "defaults to 'ssh'" do cli = Bolt::CLI.new(%w[command run --targets foo whoami]) cli.parse expect(cli.config.transport).to eq('ssh') end it "accepts ssh" do cli = Bolt::CLI.new(%w[command run --transport ssh --targets foo id]) expect(cli.parse[:transport]).to eq('ssh') end it "accepts winrm" do cli = Bolt::CLI.new(%w[command run --transport winrm --targets foo id]) expect(cli.parse[:transport]).to eq('winrm') end it "accepts pcp" do cli = Bolt::CLI.new(%w[command run --transport pcp --targets foo id]) expect(cli.parse[:transport]).to eq('pcp') end it "rejects invalid transports" do cli = Bolt::CLI.new(%w[command run --transport holodeck --targets foo id]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Invalid parameter specified for option '--transport': holodeck/) end end describe "command" do it "interprets whoami as the command" do cli = Bolt::CLI.new(%w[command run --targets foo whoami]) expect(cli.parse[:object]).to eq('whoami') end it "errors when a command is not specified" do cli = Bolt::CLI.new(%w[command run --targets foo]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Must specify a command to run/) end it "errors when a command is empty string" do cli = Bolt::CLI.new(['command', 'run', '', '--targets', 'foo']) expect { cli.parse }.to raise_error(Bolt::CLIError, /Must specify a command to run/) end it "sets specified environment variables" do cli = Bolt::CLI.new(%w[command run --targets foo whoami --env-var POP=TARTS]) expect(cli.parse[:env_vars]).to eq({ 'POP' => 'TARTS' }) end it "reads from a file when command starts with @" do command = 'whoami' with_tempfile_containing('command', command) do |file| cli = Bolt::CLI.new(%W[command run @#{file.path}]) options = cli.parse expect(options[:object]).to eq(command) end end it "reads from stdin when command is '-'" do command = 'whoami' cli = Bolt::CLI.new(%w[command run - --targets localhost]) allow($stdin).to receive(:read).and_return(command) options = cli.parse expect(options[:object]).to eq(command) end end it "distinguishes subcommands" do cli = Bolt::CLI.new(%w[script run --targets foo]) expect(cli.parse).to include(subcommand: 'script') end describe "file" do describe "upload" do it "uploads a file" do cli = Bolt::CLI.new(%w[file upload ./src /path/dest --targets foo]) result = cli.parse expect(result[:object]).to eq('./src') expect(result[:leftovers].first).to eq('/path/dest') end it "fails with --env-var" do cli = Bolt::CLI.new(%w[file upload -t foo --env-var POP=ROCKS]) expect { cli.parse } .to raise_error(Bolt::CLIError, /Option '--env-var' may only be specified when running a command or script/) end end describe "download" do it "downloads a file" do cli = Bolt::CLI.new(%w[file download /etc/ssh downloads --targets foo]) result = cli.parse expect(result[:object]).to eq('/etc/ssh') expect(result[:leftovers].first).to eq('downloads') end end end describe "handling parameters" do it "returns {} if none are specified" do cli = Bolt::CLI.new(%w[plan run my::plan --modulepath .]) result = cli.parse expect(result[:task_options]).to eq({}) end it "reads params on the command line" do cli = Bolt::CLI.new(%w[plan run my::plan kj=2hv iuhg=iube 2whf=lcv --modulepath .]) result = cli.parse expect(result[:params_parsed]).to eq(false) expect(result[:task_options]).to eq('kj' => '2hv', 'iuhg' => 'iube', '2whf' => 'lcv') end it "reads params in json with the params flag" do json_args = '{"kj":"2hv","iuhg":"iube","2whf":"lcv"}' cli = Bolt::CLI.new(['plan', 'run', 'my::plan', '--params', json_args, '--modulepath', '.']) result = cli.parse expect(result[:params_parsed]).to eq(true) expect(result[:task_options]).to eq('kj' => '2hv', 'iuhg' => 'iube', '2whf' => 'lcv') end it "raises a cli error when json parsing fails" do json_args = '{"k' cli = Bolt::CLI.new(['plan', 'run', 'my::plan', '--params', json_args]) expect { cli.parse }.to raise_error(Bolt::CLIError, /unexpected token/) end it "raises a cli error when specifying params both ways" do cli = Bolt::CLI.new(%w[plan run my::plan --params {"a":"b"} c=d --modulepath .]) expect { cli.parse }.to raise_error(Bolt::CLIError, /not both/) end it "reads json from a file when --params starts with @" do json_args = '{"kj":"2hv","iuhg":"iube","2whf":"lcv"}' with_tempfile_containing('json-args', json_args) do |file| cli = Bolt::CLI.new(%W[plan run my::plan --params @#{file.path} --modulepath .]) result = cli.parse expect(result[:task_options]).to eq('kj' => '2hv', 'iuhg' => 'iube', '2whf' => 'lcv') end end it "raises a cli error when reading the params file fails" do Dir.mktmpdir do |dir| cli = Bolt::CLI.new(%W[plan run my::plan --params @#{dir}/nope --modulepath .]) expect { cli.parse }.to raise_error(Bolt::FileError, /No such file/) end end it "reads json from stdin when --params is just '-'" do json_args = '{"kj":"2hv","iuhg":"iube","2whf":"lcv"}' cli = Bolt::CLI.new(%w[plan run my::plan --params - --modulepath .]) allow($stdin).to receive(:read).and_return(json_args) result = cli.parse expect(result[:task_options]).to eq('kj' => '2hv', 'iuhg' => 'iube', '2whf' => 'lcv') end end describe 'task' do it "errors without a task" do cli = Bolt::CLI.new(%w[task run --targets example.com --modulepath .]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Must specify/) end it "errors if task is a parameter" do cli = Bolt::CLI.new(%w[task run --targets example.com --modulepath . p1=v1]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Invalid task/) end it "fails show with --noop" do expected = "Option '--noop' may only be specified when running a task or applying manifest code" expect { cli = Bolt::CLI.new(%w[task show foo --targets bar --noop]) cli.parse }.to raise_error(Bolt::CLIError, expected) end end describe 'plan' do it "errors without a plan" do cli = Bolt::CLI.new(%w[plan run --modulepath . nodes=example.com]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Invalid plan/) end it "errors if plan is a parameter" do cli = Bolt::CLI.new(%w[plan run nodes=example.com --modulepath . p1=v1]) expect { cli.parse }.to raise_error(Bolt::CLIError, /Invalid plan/) end it "accepts targets resulting from --query from puppetdb" do cli = Bolt::CLI.new(%w[command run foo --query nodes{}]) allow(cli).to receive(:query_puppetdb_nodes).once.and_return(%w[foo bar]) targets = [Bolt::Target.new('foo'), Bolt::Target.new('bar')] result = cli.parse cli.validate(result) cli.execute(result) expect(result[:targets]).to eq(targets) expect(result[:target_args]).to eq(%w[foo bar]) end it "fails when --targets AND --query provided" do expect { cli = Bolt::CLI.new(%w[plan run foo --query nodes{} --targets bar]) cli.update_targets(cli.parse) }.to raise_error(Bolt::CLIError, /Only one targeting option/) end it "fails with --noop" do expected = "Option '--noop' may only be specified when running a task or applying manifest code" expect { cli = Bolt::CLI.new(%w[plan run foo --targets bar --noop]) cli.parse }.to raise_error(Bolt::CLIError, expected) end end describe 'apply' do it "errors without an object or inline code" do expect { cli = Bolt::CLI.new(%w[apply --targets bar]) cli.parse }.to raise_error(Bolt::CLIError, 'a manifest file or --execute is required') end it "errors with both an object and inline code" do expect { cli = Bolt::CLI.new(%w[apply foo.pp --execute hello --targets bar]) cli.parse }.to raise_error(Bolt::CLIError, '--execute is unsupported when specifying a manifest file') end end describe "bundled_content" do let(:empty_content) { { "Plan" => [], "Plugin" => Bolt::Plugin::BUILTIN_PLUGINS, "Task" => [] } } it "does not calculate bundled content for a command" do cli = Bolt::CLI.new(%w[command run foo --targets bar]) cli.parse expect(cli.bundled_content).to eq(empty_content) end it "does not calculate bundled content for a script" do cli = Bolt::CLI.new(%w[script run foo --targets bar]) cli.parse expect(cli.bundled_content).to eq(empty_content) end it "does not calculate bundled content for a file" do cli = Bolt::CLI.new(%w[file upload /tmp /var foo --targets bar]) cli.parse expect(cli.bundled_content).to eq(empty_content) end it "calculates bundled content for a task" do cli = Bolt::CLI.new(%w[task run foo --targets bar]) cli.parse expect(cli.bundled_content['Task']).not_to be_empty end it "calculates bundled content for a plan" do cli = Bolt::CLI.new(%w[plan run foo --targets bar]) cli.parse expect(cli.bundled_content['Plan']).not_to be_empty expect(cli.bundled_content['Task']).not_to be_empty end end describe "execute" do let(:executor) { double('executor', noop: false, subscribe: nil, shutdown: nil) } let(:cli) { Bolt::CLI.new({}) } let(:targets) { [target] } let(:output) { StringIO.new } let(:result_vals) { [{}] } let(:fail_vals) { [{ '_error' => {} }] } let(:result_set) do results = targets.zip(result_vals).map do |t, r| Bolt::Result.new(t, value: r) end Bolt::ResultSet.new(results) end let(:fail_set) do results = targets.zip(fail_vals).map do |t, r| Bolt::Result.new(t, value: r) end Bolt::ResultSet.new(results) end before :each do allow(cli).to receive(:config).and_return(Bolt::Config.default) allow(Bolt::Executor).to receive(:new).and_return(executor) allow(executor).to receive(:log_plan) { |_plan_name, &block| block.call } allow(executor).to receive(:run_plan) do |scope, plan, params| plan.call_by_name_with_scope(scope, params, true) end outputter = Bolt::Outputter::JSON.new(false, false, false, output) allow(cli).to receive(:outputter).and_return(outputter) end context 'when running a command' do let(:options) { { targets: targets, subcommand: 'command', action: 'run', object: 'whoami' } } it "executes the 'whoami' command" do expect(executor) .to receive(:run_command) .with(targets, 'whoami', kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) expect(cli.execute(options)).to eq(0) expect(JSON.parse(output.string)).to be end it "returns 2 if any node fails" do expect(executor) .to receive(:run_command) .with(targets, 'whoami', kind_of(Hash)) .and_return(fail_set) expect(cli.execute(options)).to eq(2) end end context "when running a script" do let(:script) { 'bar.sh' } let(:options) { { targets: targets, subcommand: 'script', action: 'run', object: script, leftovers: [] } } it "runs a script" do stub_file(script) expect(executor) .to receive(:run_script) .with(targets, script, [], kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) expect(cli.execute(options)).to eq(0) expect(JSON.parse(output.string)).to be end it "errors for non-existent scripts" do stub_non_existent_file(script) expect { cli.execute(options) }.to raise_error( Bolt::FileError, /The script '#{script}' does not exist/ ) expect(JSON.parse(output.string)).to be end it "errors for unreadable scripts" do stub_unreadable_file(script) expect { cli.execute(options) }.to raise_error( Bolt::FileError, /The script '#{script}' is unreadable/ ) expect(JSON.parse(output.string)).to be end it "errors for scripts that aren't files" do stub_directory(script) expect { cli.execute(options) }.to raise_error( Bolt::FileError, /The script '#{script}' is not a file/ ) expect(JSON.parse(output.string)).to be end it "returns 2 if any node fails" do stub_file(script) expect(executor).to receive(:run_script) .with(targets, script, [], kind_of(Hash)) .and_return(fail_set) expect(cli.execute(options)).to eq(2) end end context "when showing available tasks", :reset_puppet_settings do before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] cli.config.format = 'json' end it "lists tasks with description" do options = { subcommand: 'task', action: 'show' } cli.execute(options) tasks = JSON.parse(output.string)['tasks'] [ ['sample', nil], ['sample::echo', nil], ['sample::no_noop', 'Task with no noop'], ['sample::noop', 'Task with noop'], ['sample::notice', nil], ['sample::params', 'Task with parameters'], ['sample::ps_noop', 'Powershell task with noop'], ['sample::stdin', nil], ['sample::winstdin', nil] ].each do |taskdoc| expect(tasks).to include(taskdoc) end end it "only includes tasks set in bolt-project.yaml" do mocks = { type: '', resource_types: '', tasks: ['facts'], project_file?: true, load_as_module?: true, name: nil, to_h: {} } proj = double('project', mocks) allow(cli.config).to receive(:project).and_return(proj) options = { subcommand: 'task', action: 'show' } cli.execute(options) tasks = JSON.parse(output.string)['tasks'] expect(tasks).to eq([['facts', "Gather system facts"]]) end it "lists modulepath" do options = { subcommand: 'task', action: 'show' } cli.execute(options) modulepath = JSON.parse(output.string)['modulepath'] expect(modulepath).to include(File.join(__FILE__, '../../fixtures/modules').to_s) end it "does not list a private task" do options = { subcommand: 'task', action: 'show' } cli.execute(options) tasks = JSON.parse(output.string)['tasks'] expect(tasks).not_to include(['sample::private', 'Do not list this task']) end it "shows invidual private task" do task_name = 'sample::private' options = { subcommand: 'task', action: 'show', object: task_name } cli.execute(options) json = JSON.parse(output.string) json.delete("files") expect(json).to eq( "name" => "sample::private", "metadata" => { "name" => "Private Task", "description" => "Do not list this task", "private" => true }, "module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")) ) end it "shows an individual task data" do task_name = 'sample::params' options = { subcommand: 'task', action: 'show', object: task_name } cli.execute(options) json = JSON.parse(output.string) json.delete("files") expect(json).to eq( "name" => "sample::params", "module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")), "metadata" => { "anything" => true, "description" => "Task with parameters", "extensions" => {}, "input_method" => 'stdin', "parameters" => { "mandatory_string" => { "description" => "Mandatory string parameter", "type" => "String[1, 10]" }, "mandatory_integer" => { "description" => "Mandatory integer parameter", "type" => "Integer" }, "mandatory_boolean" => { "description" => "Mandatory boolean parameter", "type" => "Boolean" }, "non_empty_string" => { "type" => "String[1]" }, "optional_string" => { "description" => "Optional string parameter", "type" => "Optional[String]" }, "optional_integer" => { "description" => "Optional integer parameter", "type" => "Optional[Integer[-5,5]]" }, "no_type" => { "description" => "A parameter without a type" } }, "supports_noop" => true } ) end it "does not load inventory" do options = { subcommand: 'task', action: 'show' } expect(cli).not_to receive(:inventory) cli.execute(options) end end context "when available tasks include an error", :reset_puppet_settings do before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/invalid_mods')] cli.config.format = 'json' end it "task show prints a warning but shows other valid tasks" do options = { subcommand: 'task', action: 'show' } cli.execute(options) json = JSON.parse(output.string)['tasks'] tasks = [ ["package", "Manage and inspect the state of packages"], ["service", "Manage and inspect the state of services"] ] tasks.each do |task| expect(json).to include(task) end output = @log_output.readlines.join expect(output).to match(/unexpected token/) end end context "when the task is not in the modulepath", :reset_puppet_settings do before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] end it "task show displays an error" do options = { subcommand: 'task', action: 'show', object: 'abcdefg' } expect { cli.execute(options) }.to raise_error( Bolt::Error, 'Could not find a task named "abcdefg". For a list of available tasks, run "bolt task show"' ) end end context "when showing available plans", :reset_puppet_settings do before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] cli.config.format = 'json' end it "lists plans" do options = { subcommand: 'plan', action: 'show' } cli.execute(options) plan_list = JSON.parse(output.string)['plans'] [ ['sample'], ['sample::single_task'], ['sample::three_tasks'], ['sample::two_tasks'], ['sample::yaml'] ].each do |plan| expect(plan_list).to include(plan) end end it "lists modulepath" do options = { subcommand: 'plan', action: 'show' } cli.execute(options) modulepath = JSON.parse(output.string)['modulepath'] expect(modulepath).to include(File.join(__FILE__, '../../fixtures/modules').to_s) end it "shows an individual plan data" do plan_name = 'sample::optional_params_task' options = { subcommand: 'plan', action: 'show', object: plan_name } cli.execute(options) json = JSON.parse(output.string) expect(json).to eq( "name" => "sample::optional_params_task", "description" => "Demonstrates plans with optional parameters", "module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")), "parameters" => { "param_mandatory" => { "type" => "String", "description" => "A mandatory parameter", "sensitive" => false }, "param_optional" => { "type" => "Optional[String]", "description" => "An optional parameter", "sensitive" => false }, "param_with_default_value" => { "type" => "String", "description" => "A parameter with a default value", "default_value" => "'foo'", "sensitive" => false } } ) end it "warns when yard doc parameters do not match the plan signature parameters" do plan_name = 'sample::documented_param_typo' options = { subcommand: 'plan', action: 'show', object: plan_name } cli.execute(options) json = JSON.parse(output.string) expect(json).to eq( "name" => plan_name, "module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")), "description" => nil, "parameters" => { "oops" => { "type" => "String", "default_value" => "typo", "sensitive" => false } } ) expected_log = /The documented parameter 'not_oops' does not exist in plan signature/m expect(@log_output.readlines.join).to match(expected_log) end it "shows an individual yaml plan data" do plan_name = 'sample::yaml' options = { subcommand: 'plan', action: 'show', object: plan_name } cli.execute(options) json = JSON.parse(output.string) expect(json).to eq( "name" => "sample::yaml", "description" => nil, "module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")), "parameters" => { "nodes" => { "type" => "TargetSpec", "sensitive" => false }, "param_optional" => { "type" => "Optional[String]", "default_value" => 'undef', "sensitive" => false }, "param_with_default_value" => { "type" => "String", "default_value" => 'hello', "sensitive" => false } } ) end it "does not load inventory" do options = { subcommand: 'plan', action: 'show' } expect(cli).not_to receive(:inventory) cli.execute(options) end end context "when available plans include an error", :reset_puppet_settings do before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/invalid_mods')] cli.config.format = 'json' end it "plan show prints a warning but shows other valid plans" do options = { subcommand: 'plan', action: 'show' } cli.execute(options) json = JSON.parse(output.string)['plans'] expect(json).to include(["aggregate::count"], ["aggregate::targets"], ["canary"], ["facts"], ["facts::info"], ["puppetdb_fact"], ["sample::ok"]) expect(@log_output.readlines.join).to match(/Syntax error at.*single_task.pp/m) end it "plan run displays an error" do plan_name = 'sample::single_task' plan_params = { 'nodes' => targets.map(&:host).join(',') } options = { nodes: [], subcommand: 'plan', action: 'run', object: plan_name, task_options: plan_params } expect { cli.execute(options) }.to raise_error(/^Syntax error at/) end end context "when the plan is not in the modulepath", :reset_puppet_settings do before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] end it "plan show displays an error" do options = { subcommand: 'plan', action: 'show', object: 'abcdefg' } expect { cli.execute(options) }.to raise_error( Bolt::Error, 'Could not find a plan named "abcdefg". For a list of available plans, run "bolt plan show"' ) end end context "when running a task", :reset_puppet_settings do let(:task_name) { +'sample::echo' } let(:task_params) { { 'message' => 'hi' } } let(:options) { { targets: targets, subcommand: 'task', action: 'run', object: task_name, task_options: task_params, params_parsed: true } } let(:input_method) { nil } let(:task_path) { +'modules/sample/tasks/echo.sh$' } let(:task_t) { task_type(task_name, Regexp.new(task_path), input_method) } before :each do allow(executor).to receive(:report_bundled_content) cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] end it "runs a task given a name" do expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) expect(cli.execute(options)).to eq(0) expect(JSON.parse(output.string)).to be end it "returns 2 if any node fails" do expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(fail_set) expect(cli.execute(options)).to eq(2) end it "errors for non-existent modules" do task_name.replace 'dne::task1' expect { cli.execute(options) }.to raise_error( Bolt::Error, /Could not find a task named "dne::task1"/ ) expect(JSON.parse(output.string)).to be end it "errors for non-existent tasks" do task_name.replace 'sample::dne' expect { cli.execute(options) }.to raise_error( Bolt::Error, /Could not find a task named "sample::dne"/ ) expect(JSON.parse(output.string)).to be end it "raises errors from the executor" do task_params.clear expect(executor) .to receive(:run_task) .with(targets, task_t, {}, kind_of(Hash)) .and_raise("Could not connect to target") expect { cli.execute(options) }.to raise_error(/Could not connect to target/) end it "runs an init task given a module name" do task_name.replace 'sample' task_path.replace 'modules/sample/tasks/init.sh$' expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end context "input_method stdin" do let(:input_method) { 'stdin' } it "runs a task passing input on stdin" do task_name.replace 'sample::stdin' task_path.replace 'modules/sample/tasks/stdin.sh$' expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end it "runs a powershell task passing input on stdin" do task_name.replace 'sample::winstdin' task_path.replace 'modules/sample/tasks/winstdin.ps1$' expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end end describe 'task parameters validation' do let(:task_name) { +'sample::params' } let(:task_params) { {} } let(:input_method) { +'stdin' } let(:task_path) { %r{modules/sample/tasks/params.sh$} } it "errors when unknown parameters are specified" do task_params.merge!( 'foo' => 'one', 'bar' => 'two' ) expect { cli.execute(options) }.to raise_error( Bolt::PAL::PALError, /Task sample::params:\n(?x: )\s*has no parameter named 'foo'\n(?x: )\s*has no parameter named 'bar'/ ) expect(JSON.parse(output.string)).to be end it "errors when required parameters are not specified" do task_params['mandatory_string'] = 'str' expect { cli.execute(options) }.to raise_error( Bolt::PAL::PALError, /Task sample::params:\n(?x: )\s*expects a value for parameter 'mandatory_integer'\n(?x: )\s*expects a value for parameter 'mandatory_boolean'/ ) expect(JSON.parse(output.string)).to be end it "errors when the specified parameter values don't match the expected data types" do task_params.merge!( 'mandatory_string' => 'str', 'mandatory_integer' => 10, 'mandatory_boolean' => 'str', 'non_empty_string' => 'foo', 'optional_string' => 10 ) expect { cli.execute(options) }.to raise_error( Bolt::PAL::PALError, /Task sample::params:\n(?x: )\s*parameter 'mandatory_boolean' expects a Boolean value, got String\n(?x: )\s*parameter 'optional_string' expects a value of type Undef or String,(?x: ) got Integer/ ) expect(JSON.parse(output.string)).to be end it "errors when the specified parameter values are outside of the expected ranges" do task_params.merge!( 'mandatory_string' => '0123456789a', 'mandatory_integer' => 10, 'mandatory_boolean' => true, 'non_empty_string' => 'foo', 'optional_integer' => 10 ) expect { cli.execute(options) }.to raise_error( Bolt::PAL::PALError, /Task sample::params:\n(?x: )\s*parameter 'mandatory_string' expects a String\[1, 10\] value, got String\n(?x: )\s*parameter 'optional_integer' expects a value of type Undef or Integer\[-5, 5\],(?x: ) got Integer\[10, 10\]/ ) expect(JSON.parse(output.string)).to be end it "runs the task when the specified parameters are successfully validated" do expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) task_params.merge!( 'mandatory_string' => ' ', 'mandatory_integer' => 0, 'mandatory_boolean' => false, 'non_empty_string' => 'foo' ) cli.execute(options) expect(JSON.parse(output.string)).to be end context "using the pcp transport with invalid tasks" do let(:task_params) { # these are not legal parameters for the 'sample::params' task # according to the local task definition { 'foo' => 'foo', 'bar' => 'bar' } } context "when some targets don't use the PCP transport" do it "errors as usual if the task is not available locally" do task_name.replace 'unknown::task' expect { cli.execute(options) }.to raise_error( Bolt::Error, /Could not find a task named "unknown::task"/ ) expect(JSON.parse(output.string)).to be end it "errors as usual if invalid (according to the local task definition) parameters are specified" do expect { cli.execute(options) }.to raise_error( Bolt::PAL::PALError, /Task sample::params:\n(?x: )\s*has no parameter named 'foo'\n(?x: )\s*has no parameter named 'bar'/ ) expect(JSON.parse(output.string)).to be end end context "when all targets use the PCP transport" do let(:target) { inventory.get_target('pcp://foo') } let(:task_t) { task_type(task_name, /\A\z/, nil) } it "runs the task even when it is not installed locally" do task_name.replace 'unknown::task' expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end it "runs the task even when invalid (according to the local task definition) parameters are specified" do expect(executor) .to receive(:run_task) .with(targets, task_t, task_params, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end end end end end context "when running a plan", :reset_puppet_settings do let(:plan_name) { +'sample::single_task' } let(:plan_params) { { 'nodes' => targets.map(&:host).join(',') } } let(:options) { { targets: [], subcommand: 'plan', action: 'run', object: plan_name, task_options: plan_params } } let(:task_t) { task_type('sample::echo', %r{modules/sample/tasks/echo.sh$}, nil) } before :each do allow(executor).to receive(:report_function_call) allow(executor).to receive(:report_bundled_content) cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] end context 'with TargetSpec $nodes plan param' do it "uses the nodes passed using the --targets option(s) as the 'nodes' plan parameter" do plan_params.clear options[:targets] = targets.map(&:host) expect(executor) .to receive(:run_task) .with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash)) .and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'yes', '', 0, 'some_task')])) expect(executor).to receive(:start_plan) expect(executor).to receive(:log_plan) expect(executor).to receive(:run_plan) expect(executor).to receive(:finish_plan) cli.execute(options) expect(JSON.parse(output.string)).to eq( [{ 'target' => 'foo', 'status' => 'success', 'action' => 'task', 'object' => 'some_task', 'value' => { '_output' => 'yes' } }] ) end end context 'with TargetSpec $targets plan param' do let(:plan_name) { 'sample::single_task_targets' } it "uses the nodes passed using the --targets option(s) as the 'targets' plan parameter" do plan_params.clear options[:targets] = targets.map(&:host) expect(executor) .to receive(:run_task) .with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash)) .and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'yes', '', 0, 'some_task')])) expect(executor).to receive(:start_plan) expect(executor).to receive(:log_plan) expect(executor).to receive(:run_plan) expect(executor).to receive(:finish_plan) cli.execute(options) expect(JSON.parse(output.string)).to eq( [{ 'target' => 'foo', 'status' => 'success', 'action' => 'task', 'object' => 'some_task', 'value' => { '_output' => 'yes' } }] ) end end it "errors when the --targets option(s) and the 'targets' plan parameter are both specified" do options[:targets] = targets.map(&:host) options[:task_options] = { 'targets' => targets.map(&:host).join(',') } regex = /A plan's 'targets' parameter may be specified using the --targets option/ expect { cli.execute(options) }.to raise_error(regex) end it "errors when the --targets option(s) and the 'targets' plan parameter are both specified" do options[:targets] = targets.map(&:host) options[:task_options] = { 'targets' => targets.map(&:host).join(',') } regex = /A plan's 'targets' parameter may be specified using the --targets option/ expect { cli.execute(options) }.to raise_error(regex) end context "when a plan has both $targets and $nodes neither is populated with --targets" do let(:plan_name) { 'sample::targets_nodes' } it "warns when --targets does not populate both $targets and $nodes" do plan_params.clear options[:targets] = targets.map(&:host) expect(executor).to receive(:start_plan) expect(executor).to receive(:log_plan) expect(executor).to receive(:run_plan) expect(executor).to receive(:finish_plan) cli.execute(options) regex = /Plan parameters include both 'nodes' and 'targets' with type 'TargetSpec'/ expect(@log_output.readlines.join).to match(regex) end end it "formats results of a passing task" do expect(executor) .to receive(:run_task) .with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash)) .and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'yes', '', 0, 'some_task')])) expect(executor).to receive(:start_plan) expect(executor).to receive(:log_plan) expect(executor).to receive(:run_plan) expect(executor).to receive(:finish_plan) cli.execute(options) expect(JSON.parse(output.string)).to eq( [{ 'target' => 'foo', 'status' => 'success', 'action' => 'task', 'object' => 'some_task', 'value' => { '_output' => 'yes' } }] ) end it "raises errors from the executor" do expect(executor) .to receive(:run_task) .with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash)) .and_raise("Could not connect to target") expect(executor).to receive(:start_plan) expect(executor).to receive(:log_plan) expect(executor).to receive(:run_plan) expect(executor).to receive(:finish_plan) expect(cli.execute(options)).to eq(1) expect(JSON.parse(output.string)['msg']).to match(/Could not connect to target/) end it "formats results of a failing task" do expect(executor) .to receive(:run_task) .with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash)) .and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'no', '', 1, 'some_task')])) expect(executor).to receive(:start_plan) expect(executor).to receive(:log_plan) expect(executor).to receive(:run_plan) expect(executor).to receive(:finish_plan) cli.execute(options) expect(JSON.parse(output.string)).to eq( [ { 'target' => 'foo', 'status' => 'failure', 'action' => 'task', 'object' => 'some_task', 'value' => { "_output" => "no", "_error" => { "msg" => "The task failed with exit code 1", "kind" => "puppetlabs.tasks/task-error", "details" => { "exit_code" => 1 }, "issue_code" => "TASK_ERROR" } } } ] ) end it "errors for non-existent plans" do plan_name.replace 'sample::dne' expect(executor).to receive(:start_plan) expect(executor).to receive(:finish_plan) expect(cli.execute(options)).to eq(1) expect(JSON.parse(output.string)['msg']).to match(/Could not find a plan named "sample::dne"/) end end describe "file uploading" do let(:source) { '/path/to/local' } let(:dest) { '/path/to/remote' } let(:options) { { targets: targets, subcommand: 'file', action: 'upload', object: source, leftovers: [dest] } } it "uploads a file via scp" do stub_file(source) expect(executor) .to receive(:upload_file) .with(targets, source, dest, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end it "uploads a directory via scp" do stub_directory(source) allow(Dir).to receive(:foreach).with(source) expect(executor) .to receive(:upload_file) .with(targets, source, dest, kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end it "returns 2 if any node fails" do stub_file(source) expect(executor) .to receive(:upload_file) .with(targets, source, dest, kind_of(Hash)) .and_return(fail_set) expect(cli.execute(options)).to eq(2) end it "raises if the local file doesn't exist" do stub_non_existent_file(source) expect { cli.execute(options) }.to raise_error( Bolt::FileError, /The source file '#{source}' does not exist/ ) expect(JSON.parse(output.string)).to be end it "errors if the local file is unreadable" do stub_unreadable_file(source) expect { cli.execute(options) }.to raise_error( Bolt::FileError, /The source file '#{source}' is unreadable/ ) expect(JSON.parse(output.string)).to be end it "errors if a file in a subdirectory is unreadable" do child_file = File.join(source, 'afile') stub_directory(source) stub_unreadable_file(child_file) allow(Dir).to receive(:foreach).with(source).and_yield('afile') expect { cli.execute(options) }.to raise_error( Bolt::FileError, /The source file '#{child_file}' is unreadable/ ) expect(JSON.parse(output.string)).to be end end end describe "execute with noop" do let(:executor) { double('executor', noop: true, subscribe: nil, shutdown: nil) } let(:cli) { Bolt::CLI.new({}) } let(:targets) { [target] } let(:output) { StringIO.new } let(:bundled_content) { ['test'] } before :each do allow(cli).to receive(:bundled_content).and_return(bundled_content) expect(Bolt::Executor).to receive(:new).with(Bolt::Config.default.concurrency, anything, true, anything).and_return(executor) plugins = Bolt::Plugin.setup(Bolt::Config.default, nil) allow(cli).to receive(:plugins).and_return(plugins) outputter = Bolt::Outputter::JSON.new(false, false, false, output) allow(cli).to receive(:outputter).and_return(outputter) allow(executor).to receive(:report_bundled_content) allow(cli).to receive(:config).and_return(Bolt::Config.default) end context "when running a task", :reset_puppet_settings do let(:task_name) { +'sample::noop' } let(:task_params) { { 'message' => 'hi' } } let(:options) { { targets: targets, subcommand: 'task', action: 'run', object: task_name, task_options: task_params, noop: true } } let(:task_t) { task_type(task_name, %r{modules/sample/tasks/noop.sh$}, nil) } before :each do cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')] end it "runs a task that supports noop" do expect(executor) .to receive(:run_task) .with(targets, task_t, task_params.merge('_noop' => true), kind_of(Hash)) .and_return(Bolt::ResultSet.new([])) cli.execute(options) expect(JSON.parse(output.string)).to be end it "errors on a task that doesn't support noop" do task_name.replace 'sample::no_noop' expect(executor).not_to receive(:run_task) expect { cli.execute(options) }.to raise_error('Task does not support noop') end it "errors on a task without metadata" do task_name.replace 'sample::echo' expect(executor).not_to receive(:run_task) expect { cli.execute(options) }.to raise_error('Task does not support noop') end end end describe "installing a Puppetfile" do include BoltSpec::Project let(:output) { StringIO.new } let(:puppetfile) { @puppetfile } let(:modulepath) { (project_path + 'modules').to_s } let(:action_stub) { double('r10k_action_puppetfile_install') } let(:project_config) { { 'modules' => [] } } let(:cli) { Bolt::CLI.new(%W[puppetfile install --project #{project_path} -m #{modulepath}]) } before :each do allow(cli).to receive(:outputter).and_return(Bolt::Outputter::JSON.new(false, false, false, output)) allow_any_instance_of(Bolt::PAL).to receive(:generate_types) allow(R10K::Action::Puppetfile::Install).to receive(:new).and_return(action_stub) end around :each do |example| with_project do @puppetfile = File.expand_path('Puppetfile', project_path) File.write(@puppetfile, "mod 'puppetlabs-yaml'") example.run end end it 'fails if the Puppetfile does not exist' do FileUtils.rm(puppetfile) expect do cli.execute(cli.parse) end.to raise_error(Bolt::FileError, /Could not find a Puppetfile/) end it 'installs to the first directory of the modulepath' do expect(R10K::Action::Puppetfile::Install).to receive(:new) .with({ root: File.dirname(puppetfile), puppetfile: puppetfile.to_s, moduledir: modulepath }, nil) allow(action_stub).to receive(:call).and_return(true) cli.execute(cli.parse) end it 'returns 0 and prints a result if successful' do allow(action_stub).to receive(:call).and_return(true) expect(cli.execute(cli.parse)).to eq(0) result = JSON.parse(output.string) expect(result['success']).to eq(true) expect(result['puppetfile']).to eq(puppetfile.to_s) expect(result['moduledir']).to eq(modulepath.to_s) end it 'returns 1 and prints a result if unsuccessful' do allow(action_stub).to receive(:call).and_return(false) expect(cli.execute(cli.parse)).to eq(1) result = JSON.parse(output.string) expect(result['success']).to eq(false) expect(result['puppetfile']).to eq(puppetfile.to_s) expect(result['moduledir']).to eq(modulepath.to_s) end it 'propagates any r10k errors' do allow(action_stub).to receive(:call).and_raise(R10K::Error.new('everything is terrible')) expect do cli.execute(cli.parse) end.to raise_error(Bolt::PuppetfileError, /everything is terrible/) end it 'lists modules in the puppetfile' do allow(cli).to receive(:outputter).and_return(Bolt::Outputter::Human.new(false, false, false, output)) cli.parse modules = cli.list_modules expect(modules.keys.first).to match(/bolt-modules/) expect(modules.values.first.map { |h| h[:name] }).to eq(%w[boltlib ctrl dir file out prompt system]) expect(modules.values[1].map { |h| h[:name] }) .to include("aggregate", "canary", "puppetdb_fact", "puppetlabs/yaml") end it 'errors when modules is configured' do allow(Bolt::Util).to receive(:read_yaml_hash).and_call_original allow(Bolt::Util).to receive(:read_optional_yaml_hash).and_call_original expect { cli.execute(cli.parse) }.to raise_error( Bolt::CLIError, /Unable to use command/ ) end end describe "applying Puppet code" do let(:options) { { subcommand: 'apply', targets: 'foo' } } let(:output) { StringIO.new } let(:cli) { Bolt::CLI.new([]) } before :each do allow(cli).to receive(:outputter).and_return(Bolt::Outputter::JSON.new(false, false, false, output)) allow(cli).to receive(:config).and_return(Bolt::Config.default) end it 'fails if the code file does not exist' do manifest = Tempfile.new options[:object] = manifest.path manifest.close manifest.delete expect(cli).not_to receive(:apply_manifest) expect { cli.execute(options) }.to raise_error(Bolt::FileError) end end end describe 'BOLT_PROJECT' do let(:bolt_project) { '/bolt/project' } let(:pathname) { Pathname.new(bolt_project).expand_path } around(:each) do |example| original = ENV['BOLT_PROJECT'] ENV['BOLT_PROJECT'] = bolt_project example.run ensure ENV['BOLT_PROJECT'] = original end before(:each) do allow(Bolt::Util).to receive(:validate_file).and_return(true) end it 'loads from BOLT_PROJECT environment variable over --configfile' do cli = Bolt::CLI.new(%w[command run uptime --configfile /foo/bar --targets foo]) cli.parse expect(cli.config.project.path).to eq(pathname) end end describe 'configfile' do let(:configdir) { File.join(__dir__, '..', 'fixtures', 'configs') } let(:modulepath) { [File.expand_path('/foo/bar'), File.expand_path('/baz/qux')] } let(:complete_config) do { 'modulepath' => modulepath.join(File::PATH_SEPARATOR), 'inventoryfile' => File.join(__dir__, '..', 'fixtures', 'inventory', 'empty.yml'), 'concurrency' => 14, 'compile-concurrency' => 2, 'format' => 'json', 'log' => { 'console' => { 'level' => 'warn' }, File.join(configdir, 'debug.log') => { 'level' => 'debug', 'append' => false } }, 'ssh' => { 'private-key' => '/bar/foo', 'host-key-check' => false, 'connect-timeout' => 4, 'run-as' => 'Fakey McFakerson' }, 'winrm' => { 'connect-timeout' => 7, 'cacert' => '/path/to/winrm-cacert', 'extensions' => ['.py', '.bat'], 'ssl' => false, 'ssl-verify' => false }, 'pcp' => { 'task-environment' => 'testenv', 'service-url' => 'http://foo.org', 'token-file' => '/path/to/token', 'cacert' => '/path/to/cacert' } } end before(:each) do allow(Bolt::Util).to receive(:validate_file).and_return(true) end it 'reads modulepath' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check]) cli.parse expect(cli.config.modulepath).to eq(modulepath) end end it 'reads concurrency' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check]) cli.parse expect(cli.config.concurrency).to eq(14) end end it 'reads compile-concurrency' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check]) cli.parse expect(cli.config.compile_concurrency).to eq(2) end end it 'reads format' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check]) cli.parse expect(cli.config.format).to eq('json') end end it 'reads log file' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check]) cli.parse normalized_path = File.expand_path(File.join(configdir, 'debug.log')) expect(cli.config.log).to include('console' => { level: 'warn' }) expect(cli.config.log).to include("file:#{normalized_path}" => { level: 'debug', append: false }) end end it 'reads private-key for ssh' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check]) cli.parse expect(cli.config.transports['ssh']['private-key']).to match(%r{/bar/foo\z}) end end it 'reads host-key-check for ssh' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo]) cli.parse expect(cli.config.transports['ssh']['host-key-check']).to eq(false) end end it 'reads run-as for ssh' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new( %W[command run r --configfile #{conf.path} --targets foo --password bar --no-host-key-check] ) cli.parse expect(cli.config.transports['ssh']['run-as']).to eq('Fakey McFakerson') end end it 'reads separate connect-timeout for ssh and winrm' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check --no-ssl]) cli.parse expect(cli.config.transports['ssh']['connect-timeout']).to eq(4) expect(cli.config.transports['winrm']['connect-timeout']).to eq(7) end end it 'reads ssl for winrm' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo]) cli.parse expect(cli.config.transports['winrm']['ssl']).to eq(false) end end it 'reads ssl-verify for winrm' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo]) cli.parse expect(cli.config.transports['winrm']['ssl-verify']).to eq(false) end end it 'reads extensions for winrm' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-ssl]) cli.parse expect(cli.config.transports['winrm']['extensions']).to eq(['.py', '.bat']) end end it 'reads task environment for pcp' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo]) cli.parse expect(cli.config.transports['pcp']['task-environment']).to eq('testenv') end end it 'reads service url for pcp' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo]) cli.parse expect(cli.config.transports['pcp']['service-url']).to eql('http://foo.org') end end it 'reads token file for pcp' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo]) cli.parse expect(cli.config.transports['pcp']['token-file']).to match(%r{/path/to/token\z}) end end it 'reads separate cacert file for pcp and winrm' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --no-host-key-check --no-ssl]) cli.parse expect(cli.config.transports['pcp']['cacert']).to match(%r{/path/to/cacert\z}) expect(cli.config.transports['winrm']['cacert']).to match(%r{/path/to/winrm-cacert\z}) end end it 'CLI flags override config' do with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf| cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --targets foo --concurrency 12]) cli.parse expect(cli.config.concurrency).to eq(12) end end it 'raises an error if a config file is specified and invalid' do cli = Bolt::CLI.new(%W[command run uptime --configfile #{File.join(configdir, 'invalid.yml')} --targets foo]) expect { cli.parse }.to raise_error(Bolt::FileError, /Could not parse/) end end describe 'inventoryfile' do let(:inventorydir) { File.join(__dir__, '..', 'fixtures', 'configs') } it 'raises an error if an inventory file is specified and invalid' do cli = Bolt::CLI.new( %W[command run uptime --inventoryfile #{File.join(inventorydir, 'invalid.yml')} --targets foo] ) expect { cli.update_targets(cli.parse) }.to raise_error(Bolt::Error, /Could not parse/) end it 'lists targets the action would run on' do cli = Bolt::CLI.new(%w[inventory show -t localhost]) expect_any_instance_of(Bolt::Outputter::Human).to receive(:print_targets) cli.execute(cli.parse) end it 'lists targets with resolved configuration' do cli = Bolt::CLI.new(%w[inventory show -t localhost --detail]) expect_any_instance_of(Bolt::Outputter::Human).to receive(:print_target_info) cli.execute(cli.parse) end it 'lists groups in the inventory file' do cli = Bolt::CLI.new(%w[group show]) expect_any_instance_of(Bolt::Outputter::Human).to receive(:print_groups) cli.execute(cli.parse) end context 'with BOLT_INVENTORY set' do before(:each) { ENV['BOLT_INVENTORY'] = '---' } after(:each) { ENV.delete('BOLT_INVENTORY') } it 'errors when BOLT_INVENTORY is set' do cli = Bolt::CLI.new(%W[command run id --inventoryfile #{File.join(inventorydir, 'invalid.yml')} --targets foo]) expect { cli.parse }.to raise_error(Bolt::Error, /BOLT_INVENTORY is set/) end end end describe 'project' do context 'init' do it 'creates a new project in the current directory' do Dir.mktmpdir do |tmpdir| # The tmpdir doesn't have a valid modulename dir = File.join(tmpdir, 'valid') Dir.mkdir(dir) allow(Dir).to receive(:pwd).and_return(dir) file = File.join(dir, 'bolt-project.yaml') cli = Bolt::CLI.new(%w[project init]) cli.execute(cli.parse) expect(File.file?(file)).to be expect(File.read(file)).to eq("---\nname: valid\n") end end it 'creates a new project with a specified name' do Dir.mktmpdir do |tmpdir| dir = File.join(tmpdir, 'valid') Dir.mkdir(dir) allow(Dir).to receive(:pwd).and_return(dir) file = File.join(dir, 'bolt-project.yaml') cli = Bolt::CLI.new(%w[project init myproject]) cli.execute(cli.parse) expect(File.file?(file)).to be expect(File.read(file)).to eq("---\nname: myproject\n") end end it 'errors if the directory name is invalid' do Dir.mktmpdir do |dir| allow(Dir).to receive(:pwd).and_return(dir) cli = Bolt::CLI.new(%w[project init]) expect { cli.execute(cli.parse) } .to raise_error(Bolt::ValidationError, /name '#{File.basename(dir)}' is an invalid project name/) end end it 'errors if you pass in an invalid name' do cli = Bolt::CLI.new(%w[project init 123]) expect { cli.execute(cli.parse) } .to raise_error(Bolt::ValidationError, /The provided project name '123' is invalid;/) end it 'warns when a bolt-project.yaml already exists' do Dir.mktmpdir do |tmpdir| dir = File.join(tmpdir, 'valid') Dir.mkdir(dir) allow(Dir).to receive(:pwd).and_return(dir) config = File.join(dir, 'bolt-project.yaml') FileUtils.touch(config) cli = Bolt::CLI.new(%w[project init]) cli.execute(cli.parse) expect(@log_output.readlines).to include(/Found existing project directory at #{dir}/) end end context 'with modules' do it 'creates a Puppetfile and installs modules with dependencies' do # Create the tmpdir relative to the current dir to handle issues with tempfiles on Windows CI Dir.mktmpdir(nil, Dir.pwd) do |tmpdir| allow(Dir).to receive(:pwd).and_return(tmpdir) puppetfile = File.join(tmpdir, 'Puppetfile') modulepath = File.join(tmpdir, 'modules') cli = Bolt::CLI.new(%w[project init valid --modules puppetlabs-yaml]) cli.execute(cli.parse) expect(File.file?(puppetfile)).to be content = File.read(puppetfile) expect(content).to match(/mod "puppetlabs-yaml"/) expect(content).to match(/mod "puppetlabs-ruby_task_helper"/) expect(content).not_to match(/moduledir/) expect(Dir.exist?(modulepath)).to be expect(Dir.children(modulepath)).to match_array(%w[yaml ruby_task_helper]) end end it 'errors when there is an existing Puppetfile' do Dir.mktmpdir do |tmpdir| puppetfile = File.join(tmpdir, 'Puppetfile') config = File.join(tmpdir, 'bolt.yaml') FileUtils.touch(puppetfile) cli = Bolt::CLI.new(%w[project init valid --modules puppetlabs-stdlib]) expect { cli.execute(cli.parse) }.to raise_error(Bolt::CLIError) expect(File.file?(config)).not_to be end end it 'errors with unknown module names' do Dir.mktmpdir do |dir| puppetfile = File.join(dir, 'Puppetfile') config = File.join(dir, 'bolt.yaml') cli = Bolt::CLI.new(%W[project init #{dir} --modules puppetlabs-fakemodule]) expect { cli.execute(cli.parse) }.to raise_error(Bolt::ValidationError) expect(File.file?(config)).not_to be expect(File.file?(puppetfile)).not_to be end end end end end context 'when warning about CLI flags being overridden by inventory' do it "does not warn when no inventory is detected" do cli = Bolt::CLI.new(%w[command run whoami -t foo --password bar]) cli.parse expect(@log_output.readlines.join) .not_to match(/CLI arguments ["password"] may be overridden by Inventory/) end context 'when BOLT_INVENTORY is set' do before(:each) { ENV['BOLT_INVENTORY'] = JSON.dump(version: 2) } after(:each) { ENV.delete('BOLT_INVENTORY') } it "warns when BOLT_INVENTORY data is detected and CLI option could be overridden" do cli = Bolt::CLI.new(%w[command run whoami -t foo --password bar]) cli.parse expect(@log_output.readlines.join) .to match(/CLI arguments \["password"\] may be overridden by Inventory: BOLT_INVENTORY/) end end context 'when inventory file is set' do let(:inventoryfile) { File.join(__dir__, '..', 'fixtures', 'configs', 'empty.yml') } it "warns when BOLT_INVENTORY data is detected and CLI option could be overridden" do cli = Bolt::CLI.new(%W[command run whoami -t foo --password bar --inventoryfile #{inventoryfile}]) cli.parse expect(@log_output.readlines.join) .to match(/CLI arguments \["password"\] may be overridden by Inventory:/) end end end it 'with bolt-project with config, warns and ignores bolt.yaml' do Dir.mktmpdir do |dir| pwd = File.join(dir, 'validname') FileUtils.mkdir_p(pwd) FileUtils.touch(File.join(pwd, 'bolt.yaml')) File.write(File.join(pwd, 'bolt-project.yaml'), { 'format' => 'json' }.to_yaml) cli = Bolt::CLI.new(%W[command run whoami -t foo --boltdir #{pwd}]) cli.parse output = @log_output.readlines expect(output).to include(/Project-level configuration in bolt.yaml is deprecated/) expect(output).to include(/bolt-project.yaml contains valid config keys/) end end end
1
16,545
I think a test in `spec/integration` might help keep this file a bit more manageable, especially since we're just checking file contents and errors. But basically the same idea!
puppetlabs-bolt
rb
@@ -54,7 +54,6 @@ module Selenium it 'does not set the chrome.detach capability by default' do Driver.new(http_client: http) - expect(caps['goog:chromeOptions']).to eq({}) expect(caps['chrome.detach']).to be nil end
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. require File.expand_path('../../spec_helper', __FILE__) module Selenium module WebDriver module Chrome describe Driver do let(:resp) { {'sessionId' => 'foo', 'value' => Remote::Capabilities.chrome.as_json} } let(:service) { instance_double(Service, start: true, uri: 'http://example.com') } let(:caps) { Remote::Capabilities.new } let(:http) { instance_double(Remote::Http::Default, call: resp).as_null_object } before do allow(Remote::Capabilities).to receive(:chrome).and_return(caps) allow(Service).to receive(:binary_path).and_return('/foo') allow(Service).to receive(:new).and_return(service) end it 'sets the args capability' do Driver.new(http_client: http, args: %w[--foo=bar]) expect(caps['goog:chromeOptions'][:args]).to eq(%w[--foo=bar]) end it 'sets the args capability from switches' do Driver.new(http_client: http, switches: %w[--foo=bar]) expect(caps['goog:chromeOptions'][:args]).to eq(%w[--foo=bar]) end it 'sets the proxy capabilitiy' do proxy = Proxy.new(http: 'localhost:1234') Driver.new(http_client: http, proxy: proxy) expect(caps[:proxy]).to eq(proxy) end it 'does not set the chrome.detach capability by default' do Driver.new(http_client: http) expect(caps['goog:chromeOptions']).to eq({}) expect(caps['chrome.detach']).to be nil end it 'sets the prefs capability' do Driver.new(http_client: http, prefs: {foo: 'bar'}) expect(caps['goog:chromeOptions'][:prefs]).to eq(foo: 'bar') end it 'lets the user override chrome.detach' do Driver.new(http_client: http, detach: true) expect(caps['goog:chromeOptions'][:detach]).to be true end it 'raises an ArgumentError if args is not an Array' do expect { Driver.new(args: '--foo=bar') }.to raise_error(ArgumentError) end it 'uses the given profile' do profile = Profile.new profile['some_pref'] = true profile.add_extension(__FILE__) Driver.new(http_client: http, profile: profile) profile_data = profile.as_json expect(caps['goog:chromeOptions'][:args].first).to include(profile_data[:directory]) expect(caps['goog:chromeOptions'][:extensions]).to eq(profile_data[:extensions]) end it 'takes desired capabilities' do custom_caps = Remote::Capabilities.new custom_caps[:chrome_options] = {'foo' => 'bar'} expect(http).to receive(:call) do |_, _, payload| expect(payload[:desiredCapabilities][:chrome_options]).to include('foo' => 'bar') resp end Driver.new(http_client: http, desired_capabilities: custom_caps) end it 'lets direct arguments take presedence over capabilities' do custom_caps = Remote::Capabilities.new custom_caps['goog:chromeOptions'] = {'args' => %w[foo bar]} expect(http).to receive(:call) do |_, _, payload| expect(payload[:desiredCapabilities]['goog:chromeOptions'][:args]).to eq(['baz']) resp end Driver.new(http_client: http, desired_capabilities: custom_caps, args: %w[baz]) end it 'handshakes protocol' do expect(Remote::Bridge).to receive(:handshake) Driver.new(http_client: http) end end end # Chrome end # WebDriver end # Selenium
1
15,563
This spec can be modified, giving you extra strength (Check this fetch key doesn't work and therefore returns `nil`)
SeleniumHQ-selenium
java
@@ -1105,7 +1105,8 @@ instr_set_label_callback(instr_t *instr, instr_label_callback_t cb) { CLIENT_ASSERT(instr_is_label(instr), "only set callback functions for label instructions"); - CLIENT_ASSERT(instr->label_cb == NULL, "label callback function is already set"); + CLIENT_ASSERT(instr->label_cb == NULL || cb == NULL, + "label callback function is already set"); CLIENT_ASSERT(!TEST(INSTR_RAW_BITS_ALLOCATED, instr->flags), "instruction's raw bits occupying label callback memory"); instr->label_cb = cb;
1
/* ********************************************************** * Copyright (c) 2011-2021 Google, Inc. All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* file "instr_shared.c" -- IR instr_t utilities */ /* We need to provide at least one out-of-line definition for our inline * functions in instr_inline.h in case they are all inlined away within DR. * * For gcc, we use -std=gnu99, which uses the C99 inlining model. Using "extern * inline" will provide a definition, but we can only do this in one C file. * Elsewhere we use plain "inline", which will not emit an out of line * definition if inlining fails. * * MSVC always emits link_once definitions for dllexported inline functions, so * this macro magic is unnecessary. * http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx */ #define INSTR_INLINE extern inline #include "../globals.h" #include "instr.h" #include "arch.h" #include "../link.h" #include "decode.h" #include "decode_fast.h" #include "instr_create_shared.h" /* FIXME i#1551: refactor this file and avoid this x86-specific include in base arch/ */ #include "x86/decode_private.h" #ifdef DEBUG # include "disassemble.h" #endif #ifdef VMX86_SERVER # include "vmkuw.h" /* VMKUW_SYSCALL_GATEWAY */ #endif #if defined(DEBUG) && !defined(STANDALONE_DECODER) /* case 10450: give messages to clients */ /* we can't undef ASSERT b/c of DYNAMO_OPTION */ # undef ASSERT_TRUNCATE # undef ASSERT_BITFIELD_TRUNCATE # undef ASSERT_NOT_REACHED # define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD # define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD # define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD #endif /* returns an empty instr_t object */ instr_t * instr_create(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *instr = (instr_t *)heap_alloc(dcontext, sizeof(instr_t) HEAPACCT(ACCT_IR)); /* everything initializes to 0, even flags, to indicate * an uninitialized instruction */ memset((void *)instr, 0, sizeof(instr_t)); #if defined(X86) && defined(X64) instr_set_isa_mode(instr, X64_CACHE_MODE_DC(dcontext) ? DR_ISA_AMD64 : DR_ISA_IA32); #elif defined(ARM) instr_set_isa_mode(instr, dr_get_isa_mode(dcontext)); #endif return instr; } /* deletes the instr_t object with handle "instr" and frees its storage */ void instr_destroy(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; #ifdef ARM /* i#4680: Reset encode state to avoid dangling pointers. This doesn't cover * auto-scope instr_t vars so the whole IT tracking is still fragile. */ if (instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB) encode_instr_freed_event(dcontext, instr); #endif instr_free(dcontext, instr); /* CAUTION: assumes that instr is not part of any instrlist */ heap_free(dcontext, instr, sizeof(instr_t) HEAPACCT(ACCT_IR)); } /* returns a clone of orig, but with next and prev fields set to NULL */ instr_t * instr_clone(void *drcontext, instr_t *orig) { dcontext_t *dcontext = (dcontext_t *)drcontext; /* We could heap-allocate an instr_noalloc_t but it's intended for use in a * signal handler or other places where we don't want any heap allocation. */ CLIENT_ASSERT(!TEST(INSTR_IS_NOALLOC_STRUCT, orig->flags), "Cloning an instr_noalloc_t is not supported."); instr_t *instr = (instr_t *)heap_alloc(dcontext, sizeof(instr_t) HEAPACCT(ACCT_IR)); memcpy((void *)instr, (void *)orig, sizeof(instr_t)); instr->next = NULL; instr->prev = NULL; /* PR 214962: clients can see some of our mangling * (dr_insert_mbr_instrumentation(), traces), but don't let the flag * mark other client instrs, which could mess up state translation */ instr_set_our_mangling(instr, false); if ((orig->flags & INSTR_RAW_BITS_ALLOCATED) != 0) { /* instr length already set from memcpy */ instr->bytes = (byte *)heap_reachable_alloc(dcontext, instr->length HEAPACCT(ACCT_IR)); memcpy((void *)instr->bytes, (void *)orig->bytes, instr->length); } else if (instr_is_label(orig)) { /* We don't know what this callback does, we can't copy this. The caller that * makes the clone needs to take care of this, xref i#3926. */ instr_set_label_callback(instr, NULL); } if (orig->num_dsts > 0) { /* checking num_dsts, not dsts, b/c of label data */ instr->dsts = (opnd_t *)heap_alloc( dcontext, instr->num_dsts * sizeof(opnd_t) HEAPACCT(ACCT_IR)); memcpy((void *)instr->dsts, (void *)orig->dsts, instr->num_dsts * sizeof(opnd_t)); } if (orig->num_srcs > 1) { /* checking num_src, not srcs, b/c of label data */ instr->srcs = (opnd_t *)heap_alloc( dcontext, (instr->num_srcs - 1) * sizeof(opnd_t) HEAPACCT(ACCT_IR)); memcpy((void *)instr->srcs, (void *)orig->srcs, (instr->num_srcs - 1) * sizeof(opnd_t)); } /* copy note (we make no guarantee, and have no way, to do a deep clone) */ instr->note = orig->note; if (instr_is_label(orig)) memcpy(&instr->label_data, &orig->label_data, sizeof(instr->label_data)); return instr; } /* zeroes out the fields of instr */ void instr_init(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; /* everything initializes to 0, even flags, to indicate * an uninitialized instruction */ memset((void *)instr, 0, sizeof(instr_t)); instr_set_isa_mode(instr, dr_get_isa_mode(dcontext)); } /* zeroes out the fields of instr */ void instr_noalloc_init(void *drcontext, instr_noalloc_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; memset(instr, 0, sizeof(*instr)); instr->instr.flags |= INSTR_IS_NOALLOC_STRUCT; instr_set_isa_mode(&instr->instr, dr_get_isa_mode(dcontext)); } /* Frees all dynamically allocated storage that was allocated by instr */ void instr_free(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; if (instr_is_label(instr) && instr_get_label_callback(instr) != NULL) (*instr->label_cb)(dcontext, instr); if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) return; if (TEST(INSTR_RAW_BITS_ALLOCATED, instr->flags)) { instr_free_raw_bits(dcontext, instr); } if (instr->num_dsts > 0) { /* checking num_dsts, not dsts, b/c of label data */ heap_free(dcontext, instr->dsts, instr->num_dsts * sizeof(opnd_t) HEAPACCT(ACCT_IR)); instr->dsts = NULL; instr->num_dsts = 0; } if (instr->num_srcs > 1) { /* checking num_src, not src, b/c of label data */ /* remember one src is static, rest are dynamic */ heap_free(dcontext, instr->srcs, (instr->num_srcs - 1) * sizeof(opnd_t) HEAPACCT(ACCT_IR)); instr->srcs = NULL; instr->num_srcs = 0; } } int instr_mem_usage(instr_t *instr) { if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) return sizeof(instr_noalloc_t); int usage = 0; if ((instr->flags & INSTR_RAW_BITS_ALLOCATED) != 0) { usage += instr->length; } if (instr->dsts != NULL) { usage += instr->num_dsts * sizeof(opnd_t); } if (instr->srcs != NULL) { /* remember one src is static, rest are dynamic */ usage += (instr->num_srcs - 1) * sizeof(opnd_t); } usage += sizeof(instr_t); return usage; } /* Frees all dynamically allocated storage that was allocated by instr * Also zeroes out instr's fields * This instr must have been initialized before! */ void instr_reset(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_free(dcontext, instr); if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) { instr_init(dcontext, instr); instr->flags |= INSTR_IS_NOALLOC_STRUCT; } else { instr_init(dcontext, instr); } } /* Frees all dynamically allocated storage that was allocated by instr, * except for allocated raw bits. * Also zeroes out instr's fields, except for raw bit fields and next and prev * fields, whether instr is ok to mangle, and instr's x86 mode. * Use this routine when you want to decode more information into the * same instr_t structure. * This instr must have been initialized before! */ void instr_reuse(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; byte *bits = NULL; uint len = 0; bool alloc = false; bool mangle = instr_is_app(instr); dr_isa_mode_t isa_mode = instr_get_isa_mode(instr); #ifdef X86 uint rip_rel_pos = instr_rip_rel_valid(instr) ? instr->rip_rel_pos : 0; #endif instr_t *next = instr->next; instr_t *prev = instr->prev; if (instr_raw_bits_valid(instr)) { if (instr_has_allocated_bits(instr)) { /* pretend has no allocated bits to prevent freeing of them */ instr->flags &= ~INSTR_RAW_BITS_ALLOCATED; alloc = true; } bits = instr->bytes; len = instr->length; } instr_free(dcontext, instr); instr_init(dcontext, instr); /* now re-add them */ instr->next = next; instr->prev = prev; if (bits != NULL) { instr->bytes = bits; instr->length = len; /* assume that the bits are now valid and the operands are not * (operand and eflags flags are already unset from init) */ instr->flags |= INSTR_RAW_BITS_VALID; if (alloc) instr->flags |= INSTR_RAW_BITS_ALLOCATED; } /* preserve across the up-decode */ instr_set_isa_mode(instr, isa_mode); #ifdef X86 if (rip_rel_pos > 0) instr_set_rip_rel_pos(instr, rip_rel_pos); #endif if (!mangle) instr->flags |= INSTR_DO_NOT_MANGLE; } instr_t * instr_build(void *drcontext, int opcode, int instr_num_dsts, int instr_num_srcs) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *instr = instr_create(dcontext); instr_set_opcode(instr, opcode); instr_set_num_opnds(dcontext, instr, instr_num_dsts, instr_num_srcs); return instr; } instr_t * instr_build_bits(void *drcontext, int opcode, uint num_bytes) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *instr = instr_create(dcontext); instr_set_opcode(instr, opcode); instr_allocate_raw_bits(dcontext, instr, num_bytes); return instr; } /* encodes to buffer, then returns length. * needed for things we must have encoding for: length and eflags. * if !always_cache, only caches the encoding if instr_is_app(); * if always_cache, the caller should invalidate the cache when done. */ static int private_instr_encode(dcontext_t *dcontext, instr_t *instr, bool always_cache) { byte *buf; byte stack_buf[MAX_INSTR_LENGTH]; if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) { /* We have no choice: we live with no persistent caching if the stack is * too far away, because the instr's raw bits will be on the stack. * (We can't use encode_buf here bc the re-rel below does not support * the same buffer; maybe it could w/ a memmove in the encode code?) */ buf = stack_buf; } else { /* We cannot efficiently use a stack buffer for encoding since our stack on x64 * linux can be too far to reach from our heap. We need reachable heap. * Otherwise we can't keep the encoding around since re-relativization won't * work. */ buf = heap_reachable_alloc(dcontext, MAX_INSTR_LENGTH HEAPACCT(ACCT_IR)); } uint len; /* Do not cache instr opnds as they are pc-relative to final encoding location. * Rather than us walking all of the operands separately here, we have * instr_encode_check_reachability tell us while it does its normal walk. * Xref i#731. */ bool has_instr_opnds; byte *nxt = instr_encode_check_reachability(dcontext, instr, buf, &has_instr_opnds); bool valid_to_cache = !has_instr_opnds; if (nxt == NULL) { nxt = instr_encode_ignore_reachability(dcontext, instr, buf); if (nxt == NULL) { SYSLOG_INTERNAL_WARNING("cannot encode %s", opcode_to_encoding_info(instr->opcode, instr_get_isa_mode(instr) _IF_ARM(false)) ->name); if (!TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) heap_reachable_free(dcontext, buf, MAX_INSTR_LENGTH HEAPACCT(ACCT_IR)); return 0; } /* if unreachable, we can't cache, since re-relativization won't work */ valid_to_cache = false; } len = (int)(nxt - buf); CLIENT_ASSERT(len > 0 || instr_is_label(instr), "encode instr for length/eflags error: zero length"); CLIENT_ASSERT(len <= MAX_INSTR_LENGTH, "encode instr for length/eflags error: instr too long"); /* do not cache encoding if mangle is false, that way we can have * non-cti-instructions that are pc-relative. * we also cannot cache if a rip-relative operand is unreachable. * we can cache if a rip-relative operand is present b/c instr_encode() * sets instr_set_rip_rel_pos() for us. */ if (len > 0 && ((valid_to_cache && instr_is_app(instr)) || always_cache /*caller will use then invalidate*/)) { bool valid = instr_operands_valid(instr); #ifdef X86 /* we can't call instr_rip_rel_valid() b/c the raw bytes are not yet * set up: we rely on instr_encode() setting instr->rip_rel_pos and * the valid flag, even though raw bytes weren't there at the time. * we rely on the INSTR_RIP_REL_VALID flag being invalidated whenever * the raw bits are. */ bool rip_rel_valid = TEST(INSTR_RIP_REL_VALID, instr->flags); #endif byte *tmp; CLIENT_ASSERT(!instr_raw_bits_valid(instr), "encode instr: bit validity error"); /* else shouldn't get here */ instr_allocate_raw_bits(dcontext, instr, len); /* we use a hack in order to take advantage of * copy_and_re_relativize_raw_instr(), which copies from instr->bytes * using rip-rel-calculating routines that also use instr->bytes. */ tmp = instr->bytes; instr->bytes = buf; #ifdef X86 instr_set_rip_rel_valid(instr, rip_rel_valid); #endif copy_and_re_relativize_raw_instr(dcontext, instr, tmp, tmp); instr->bytes = tmp; instr_set_operands_valid(instr, valid); } if (!TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) heap_reachable_free(dcontext, buf, MAX_INSTR_LENGTH HEAPACCT(ACCT_IR)); return len; } #define inlined_instr_get_opcode(instr) \ (IF_DEBUG_(CLIENT_ASSERT(sizeof(*instr) == sizeof(instr_t), "invalid type"))( \ ((instr)->opcode == OP_UNDECODED) \ ? (instr_decode_with_current_dcontext(instr), (instr)->opcode) \ : (instr)->opcode)) int instr_get_opcode(instr_t *instr) { return inlined_instr_get_opcode(instr); } /* in rest of file, directly de-reference for performance (PR 622253) */ #define instr_get_opcode inlined_instr_get_opcode static inline void instr_being_modified(instr_t *instr, bool raw_bits_valid) { if (!raw_bits_valid) { /* if we're modifying the instr, don't use original bits to encode! */ instr_set_raw_bits_valid(instr, false); } /* PR 214962: if client changes our mangling, un-mark to avoid bad translation */ instr_set_our_mangling(instr, false); } void instr_set_opcode(instr_t *instr, int opcode) { instr->opcode = opcode; /* if we're modifying opcode, don't use original bits to encode! */ instr_being_modified(instr, false /*raw bits invalid*/); /* do not assume operands are valid, they are separate from opcode, * but if opcode is invalid operands shouldn't be valid */ CLIENT_ASSERT((opcode != OP_INVALID && opcode != OP_UNDECODED) || !instr_operands_valid(instr), "instr_set_opcode: operand-opcode validity mismatch"); } /* Returns true iff instr's opcode is NOT OP_INVALID. * Not to be confused with an invalid opcode, which can be OP_INVALID or * OP_UNDECODED. OP_INVALID means an instruction with no valid fields: * raw bits (may exist but do not correspond to a valid instr), opcode, * eflags, or operands. It could be an uninitialized * instruction or the result of decoding an invalid sequence of bytes. */ bool instr_valid(instr_t *instr) { return (instr->opcode != OP_INVALID); } DR_API /* Get the original application PC of the instruction if it exists. */ app_pc instr_get_app_pc(instr_t *instr) { return instr_get_translation(instr); } /* Returns true iff instr's opcode is valid. If the opcode is not * OP_INVALID or OP_UNDECODED it is assumed to be valid. However, calling * instr_get_opcode() will attempt to decode an OP_UNDECODED opcode, hence the * purpose of this routine. */ bool instr_opcode_valid(instr_t *instr) { return (instr->opcode != OP_INVALID && instr->opcode != OP_UNDECODED); } const instr_info_t * instr_get_instr_info(instr_t *instr) { dr_isa_mode_t isa_mode; #ifdef ARM bool in_it_block = false; #endif if (instr == NULL) return NULL; isa_mode = instr_get_isa_mode(instr); #ifdef ARM if (isa_mode == DR_ISA_ARM_THUMB) { /* A predicated OP_b_short could be either in an IT block or not, * we assume it is not in an IT block in the case of OP_b_short. */ if (instr_get_opcode(instr) != OP_b_short && instr_get_predicate(instr) != DR_PRED_NONE) in_it_block = true; } #endif return opcode_to_encoding_info(instr_get_opcode(instr), isa_mode _IF_ARM(in_it_block)); } const instr_info_t * get_instr_info(int opcode) { /* Assuming the use case of this function is to get the opcode related info, *e.g., eflags in instr_get_opcode_eflags for OP_adds vs OP_add, so it does * not matter whether it is in an IT block or not. */ return opcode_to_encoding_info( opcode, dr_get_isa_mode(get_thread_private_dcontext()) _IF_ARM(false)); } #undef instr_get_src opnd_t instr_get_src(instr_t *instr, uint pos) { return INSTR_GET_SRC(instr, pos); } #define instr_get_src INSTR_GET_SRC #undef instr_get_dst opnd_t instr_get_dst(instr_t *instr, uint pos) { return INSTR_GET_DST(instr, pos); } #define instr_get_dst INSTR_GET_DST /* allocates storage for instr_num_srcs src operands and instr_num_dsts dst operands * assumes that instr is currently all zeroed out! */ void instr_set_num_opnds(void *drcontext, instr_t *instr, int instr_num_dsts, int instr_num_srcs) { dcontext_t *dcontext = (dcontext_t *)drcontext; if (instr_num_dsts > 0) { CLIENT_ASSERT(instr->num_dsts == 0 && instr->dsts == NULL, "instr_set_num_opnds: dsts are already set"); CLIENT_ASSERT_TRUNCATE(instr->num_dsts, byte, instr_num_dsts, "instr_set_num_opnds: too many dsts"); instr->num_dsts = (byte)instr_num_dsts; if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) { instr_noalloc_t *noalloc = (instr_noalloc_t *)instr; noalloc->instr.dsts = noalloc->dsts; } else { instr->dsts = (opnd_t *)heap_alloc( dcontext, instr_num_dsts * sizeof(opnd_t) HEAPACCT(ACCT_IR)); } } if (instr_num_srcs > 0) { /* remember that src0 is static, rest are dynamic */ if (instr_num_srcs > 1) { CLIENT_ASSERT(instr->num_srcs <= 1 && instr->srcs == NULL, "instr_set_num_opnds: srcs are already set"); if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) { instr_noalloc_t *noalloc = (instr_noalloc_t *)instr; noalloc->instr.srcs = noalloc->srcs; } else { instr->srcs = (opnd_t *)heap_alloc( dcontext, (instr_num_srcs - 1) * sizeof(opnd_t) HEAPACCT(ACCT_IR)); } } CLIENT_ASSERT_TRUNCATE(instr->num_srcs, byte, instr_num_srcs, "instr_set_num_opnds: too many srcs"); instr->num_srcs = (byte)instr_num_srcs; } instr_being_modified(instr, false /*raw bits invalid*/); /* assume all operands are valid */ instr_set_operands_valid(instr, true); } /* sets the src opnd at position pos in instr */ void instr_set_src(instr_t *instr, uint pos, opnd_t opnd) { CLIENT_ASSERT(pos >= 0 && pos < instr->num_srcs, "instr_set_src: ordinal invalid"); /* remember that src0 is static, rest are dynamic */ if (pos == 0) instr->src0 = opnd; else instr->srcs[pos - 1] = opnd; /* if we're modifying operands, don't use original bits to encode! */ instr_being_modified(instr, false /*raw bits invalid*/); /* assume all operands are valid */ instr_set_operands_valid(instr, true); } /* sets the dst opnd at position pos in instr */ void instr_set_dst(instr_t *instr, uint pos, opnd_t opnd) { CLIENT_ASSERT(pos >= 0 && pos < instr->num_dsts, "instr_set_dst: ordinal invalid"); instr->dsts[pos] = opnd; /* if we're modifying operands, don't use original bits to encode! */ instr_being_modified(instr, false /*raw bits invalid*/); /* assume all operands are valid */ instr_set_operands_valid(instr, true); } /* end is open-ended (so pass pos,pos+1 to remove just the pos-th src) */ void instr_remove_srcs(void *drcontext, instr_t *instr, uint start, uint end) { dcontext_t *dcontext = (dcontext_t *)drcontext; opnd_t *new_srcs; CLIENT_ASSERT(!TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags), /* We could implement, but it does not seem an important use case. */ "instr_remove_srcs not supported for instr_noalloc_t"); CLIENT_ASSERT(start >= 0 && end <= instr->num_srcs && start < end, "instr_remove_srcs: ordinals invalid"); if (instr->num_srcs - 1 > (byte)(end - start)) { new_srcs = (opnd_t *)heap_alloc(dcontext, (instr->num_srcs - 1 - (end - start)) * sizeof(opnd_t) HEAPACCT(ACCT_IR)); if (start > 1) memcpy(new_srcs, instr->srcs, (start - 1) * sizeof(opnd_t)); if ((byte)end < instr->num_srcs - 1) { memcpy(new_srcs + (start == 0 ? 0 : (start - 1)), instr->srcs + end, (instr->num_srcs - 1 - end) * sizeof(opnd_t)); } } else new_srcs = NULL; if (start == 0 && end < instr->num_srcs) instr->src0 = instr->srcs[end - 1]; heap_free(dcontext, instr->srcs, (instr->num_srcs - 1) * sizeof(opnd_t) HEAPACCT(ACCT_IR)); instr->num_srcs -= (byte)(end - start); instr->srcs = new_srcs; instr_being_modified(instr, false /*raw bits invalid*/); instr_set_operands_valid(instr, true); } /* end is open-ended (so pass pos,pos+1 to remove just the pos-th dst) */ void instr_remove_dsts(void *drcontext, instr_t *instr, uint start, uint end) { dcontext_t *dcontext = (dcontext_t *)drcontext; opnd_t *new_dsts; CLIENT_ASSERT(!TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags), /* We could implement, but it does not seem an important use case. */ "instr_remove_srcs not supported for instr_noalloc_t"); CLIENT_ASSERT(start >= 0 && end <= instr->num_dsts && start < end, "instr_remove_dsts: ordinals invalid"); if (instr->num_dsts > (byte)(end - start)) { new_dsts = (opnd_t *)heap_alloc(dcontext, (instr->num_dsts - (end - start)) * sizeof(opnd_t) HEAPACCT(ACCT_IR)); if (start > 0) memcpy(new_dsts, instr->dsts, start * sizeof(opnd_t)); if (end < instr->num_dsts) { memcpy(new_dsts + start, instr->dsts + end, (instr->num_dsts - end) * sizeof(opnd_t)); } } else new_dsts = NULL; heap_free(dcontext, instr->dsts, instr->num_dsts * sizeof(opnd_t) HEAPACCT(ACCT_IR)); instr->num_dsts -= (byte)(end - start); instr->dsts = new_dsts; instr_being_modified(instr, false /*raw bits invalid*/); instr_set_operands_valid(instr, true); } #undef instr_get_target opnd_t instr_get_target(instr_t *instr) { return INSTR_GET_TARGET(instr); } #define instr_get_target INSTR_GET_TARGET /* Assumes that if an instr has a jump target, it's stored in the 0th src * location. */ void instr_set_target(instr_t *instr, opnd_t target) { CLIENT_ASSERT(instr->num_srcs >= 1, "instr_set_target: instr has no sources"); instr->src0 = target; /* if we're modifying operands, don't use original bits to encode, * except for jecxz/loop* */ instr_being_modified(instr, instr_is_cti_short_rewrite(instr, NULL)); /* assume all operands are valid */ instr_set_operands_valid(instr, true); } instr_t * instr_set_prefix_flag(instr_t *instr, uint prefix) { instr->prefixes |= prefix; instr_being_modified(instr, false /*raw bits invalid*/); return instr; } bool instr_get_prefix_flag(instr_t *instr, uint prefix) { return ((instr->prefixes & prefix) != 0); } void instr_set_prefixes(instr_t *instr, uint prefixes) { instr->prefixes = prefixes; instr_being_modified(instr, false /*raw bits invalid*/); } uint instr_get_prefixes(instr_t *instr) { return instr->prefixes; } bool instr_is_predicated(instr_t *instr) { /* XXX i#1556: we should also mark conditional branches and string loops * as predicated! */ dr_pred_type_t pred = instr_get_predicate(instr); return instr_predicate_is_cond(pred); } dr_pred_type_t instr_get_predicate(instr_t *instr) { /* Optimization: we assume prefixes are the high bits to avoid an & */ return instr->prefixes >> PREFIX_PRED_BITPOS; } instr_t * instr_set_predicate(instr_t *instr, dr_pred_type_t pred) { instr->prefixes |= ((pred << PREFIX_PRED_BITPOS) & PREFIX_PRED_MASK); return instr; } bool instr_branch_is_padded(instr_t *instr) { return TEST(INSTR_BRANCH_PADDED, instr->flags); } void instr_branch_set_padded(instr_t *instr, bool val) { if (val) instr->flags |= INSTR_BRANCH_PADDED; else instr->flags &= ~INSTR_BRANCH_PADDED; } /* Returns true iff instr has been marked as a special exit cti */ bool instr_branch_special_exit(instr_t *instr) { return TEST(INSTR_BRANCH_SPECIAL_EXIT, instr->flags); } /* If val is true, indicates that instr is a special exit cti. * If val is false, indicates otherwise */ void instr_branch_set_special_exit(instr_t *instr, bool val) { if (val) instr->flags |= INSTR_BRANCH_SPECIAL_EXIT; else instr->flags &= ~INSTR_BRANCH_SPECIAL_EXIT; } /* Returns the type of the original indirect branch of an exit */ int instr_exit_branch_type(instr_t *instr) { return instr->flags & EXIT_CTI_TYPES; } /* Set type of indirect branch exit */ void instr_exit_branch_set_type(instr_t *instr, uint type) { /* set only expected flags */ type &= EXIT_CTI_TYPES; instr->flags &= ~EXIT_CTI_TYPES; instr->flags |= type; } void instr_set_ok_to_mangle(instr_t *instr, bool val) { if (val) instr_set_app(instr); else instr_set_meta(instr); } void instr_set_app(instr_t *instr) { instr->flags &= ~INSTR_DO_NOT_MANGLE; } void instr_set_meta(instr_t *instr) { instr->flags |= INSTR_DO_NOT_MANGLE; } bool instr_is_meta_may_fault(instr_t *instr) { /* no longer using a special flag (i#496) */ return instr_is_meta(instr) && instr_get_translation(instr) != NULL; } void instr_set_meta_may_fault(instr_t *instr, bool val) { /* no longer using a special flag (i#496) */ instr_set_meta(instr); CLIENT_ASSERT(instr_get_translation(instr) != NULL, "meta_may_fault instr must have translation"); } /* convenience routine */ void instr_set_meta_no_translation(instr_t *instr) { instr_set_meta(instr); instr_set_translation(instr, NULL); } void instr_set_ok_to_emit(instr_t *instr, bool val) { CLIENT_ASSERT(instr != NULL, "instr_set_ok_to_emit: passed NULL"); if (val) instr->flags &= ~INSTR_DO_NOT_EMIT; else instr->flags |= INSTR_DO_NOT_EMIT; } uint instr_eflags_conditionally(uint full_eflags, dr_pred_type_t pred, dr_opnd_query_flags_t flags) { if (!TEST(DR_QUERY_INCLUDE_COND_SRCS, flags) && instr_predicate_is_cond(pred) && !instr_predicate_reads_srcs(pred)) { /* i#1836: the predicate itself reads some flags */ full_eflags &= ~EFLAGS_READ_NON_PRED; } if (!TEST(DR_QUERY_INCLUDE_COND_DSTS, flags) && instr_predicate_is_cond(pred) && !instr_predicate_writes_eflags(pred)) full_eflags &= ~EFLAGS_WRITE_ALL; return full_eflags; } uint instr_get_eflags(instr_t *instr, dr_opnd_query_flags_t flags) { if ((instr->flags & INSTR_EFLAGS_VALID) == 0) { bool encoded = false; dcontext_t *dcontext = get_thread_private_dcontext(); dr_isa_mode_t old_mode; /* we assume we cannot trust the opcode independently of operands */ if (instr_needs_encoding(instr)) { int len; encoded = true; len = private_instr_encode(dcontext, instr, true /*cache*/); if (len == 0) { if (!instr_is_label(instr)) CLIENT_ASSERT(false, "instr_get_eflags: invalid instr"); return 0; } } dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode); decode_eflags_usage(dcontext, instr_get_raw_bits(instr), &instr->eflags, DR_QUERY_INCLUDE_ALL); dr_set_isa_mode(dcontext, old_mode, NULL); if (encoded) { /* if private_instr_encode passed us back whether it's valid * to cache (i.e., non-meta instr that can reach) we could skip * this invalidation for such cases */ instr_free_raw_bits(dcontext, instr); CLIENT_ASSERT(!instr_raw_bits_valid(instr), "internal encoding buf error"); } /* even if decode fails, set valid to true -- ok? FIXME */ instr_set_eflags_valid(instr, true); } return instr_eflags_conditionally(instr->eflags, instr_get_predicate(instr), flags); } DR_API /* Returns the eflags usage of instructions with opcode "opcode", * as EFLAGS_ constants or'ed together */ uint instr_get_opcode_eflags(int opcode) { /* assumption: all encoding of an opcode have same eflags behavior! */ const instr_info_t *info = get_instr_info(opcode); return info->eflags; } uint instr_get_arith_flags(instr_t *instr, dr_opnd_query_flags_t flags) { if ((instr->flags & INSTR_EFLAGS_6_VALID) == 0) { /* just get info on all the flags */ return instr_get_eflags(instr, flags); } return instr_eflags_conditionally(instr->eflags, instr_get_predicate(instr), flags); } bool instr_eflags_valid(instr_t *instr) { return ((instr->flags & INSTR_EFLAGS_VALID) != 0); } void instr_set_eflags_valid(instr_t *instr, bool valid) { if (valid) { instr->flags |= INSTR_EFLAGS_VALID; instr->flags |= INSTR_EFLAGS_6_VALID; } else { /* assume that arith flags are also invalid */ instr->flags &= ~INSTR_EFLAGS_VALID; instr->flags &= ~INSTR_EFLAGS_6_VALID; } } /* Returns true iff instr's arithmetic flags (the 6 bottom eflags) are * up to date */ bool instr_arith_flags_valid(instr_t *instr) { return ((instr->flags & INSTR_EFLAGS_6_VALID) != 0); } /* Sets instr's arithmetic flags (the 6 bottom eflags) to be valid if * valid is true, invalid otherwise */ void instr_set_arith_flags_valid(instr_t *instr, bool valid) { if (valid) instr->flags |= INSTR_EFLAGS_6_VALID; else { instr->flags &= ~INSTR_EFLAGS_VALID; instr->flags &= ~INSTR_EFLAGS_6_VALID; } } void instr_set_operands_valid(instr_t *instr, bool valid) { if (valid) instr->flags |= INSTR_OPERANDS_VALID; else instr->flags &= ~INSTR_OPERANDS_VALID; } /* N.B.: this routine sets the "raw bits are valid" flag */ void instr_set_raw_bits(instr_t *instr, byte *addr, uint length) { if ((instr->flags & INSTR_RAW_BITS_ALLOCATED) != 0) { /* this does happen, when up-decoding an instr using its * own raw bits, so let it happen, but make sure allocated * bits aren't being lost */ CLIENT_ASSERT(addr == instr->bytes && length == instr->length, "instr_set_raw_bits: bits already there, but different"); } if (!instr_valid(instr)) instr_set_opcode(instr, OP_UNDECODED); instr->flags |= INSTR_RAW_BITS_VALID; instr->bytes = addr; instr->length = length; #ifdef X86 instr_set_rip_rel_valid(instr, false); /* relies on original raw bits */ #endif } /* this is sort of a hack, used to allow dynamic reallocation of * the trace buffer, which requires shifting the addresses of all * the trace Instrs since they point into the old buffer */ void instr_shift_raw_bits(instr_t *instr, ssize_t offs) { if ((instr->flags & INSTR_RAW_BITS_VALID) != 0) instr->bytes += offs; #ifdef X86 instr_set_rip_rel_valid(instr, false); /* relies on original raw bits */ #endif } /* moves the instruction from USE_ORIGINAL_BITS state to a * needs-full-encoding state */ void instr_set_raw_bits_valid(instr_t *instr, bool valid) { if (valid) instr->flags |= INSTR_RAW_BITS_VALID; else { instr->flags &= ~INSTR_RAW_BITS_VALID; /* DO NOT set bytes to NULL or length to 0, we still want to be * able to point at the original instruction for use in translating * addresses for exception/signal handlers * Also do not de-allocate allocated bits */ #ifdef X86 instr_set_rip_rel_valid(instr, false); #endif } } void instr_free_raw_bits(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; if ((instr->flags & INSTR_RAW_BITS_ALLOCATED) == 0) return; if (!TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) heap_reachable_free(dcontext, instr->bytes, instr->length HEAPACCT(ACCT_IR)); instr->bytes = NULL; instr->flags &= ~INSTR_RAW_BITS_VALID; instr->flags &= ~INSTR_RAW_BITS_ALLOCATED; } /* creates array of bytes to store raw bytes of an instr into * (original bits are read-only) * initializes array to the original bits! */ void instr_allocate_raw_bits(void *drcontext, instr_t *instr, uint num_bytes) { dcontext_t *dcontext = (dcontext_t *)drcontext; byte *original_bits = NULL; if (TEST(INSTR_RAW_BITS_VALID, instr->flags)) original_bits = instr->bytes; if (!TEST(INSTR_RAW_BITS_ALLOCATED, instr->flags) || instr->length != num_bytes) { byte *new_bits; if (TEST(INSTR_IS_NOALLOC_STRUCT, instr->flags)) { /* This may not be reachable, so re-relativization is limited. */ instr_noalloc_t *noalloc = (instr_noalloc_t *)instr; CLIENT_ASSERT(num_bytes <= sizeof(noalloc->encode_buf), "instr_allocate_raw_bits exceeds instr_noalloc_t capacity"); new_bits = noalloc->encode_buf; } else { /* We need reachable heap for rip-rel re-relativization. */ new_bits = (byte *)heap_reachable_alloc(dcontext, num_bytes HEAPACCT(ACCT_IR)); } if (original_bits != NULL) { /* copy original bits into modified bits so can just modify * a few and still have all info in one place */ memcpy(new_bits, original_bits, (num_bytes < instr->length) ? num_bytes : instr->length); } if ((instr->flags & INSTR_RAW_BITS_ALLOCATED) != 0) instr_free_raw_bits(dcontext, instr); instr->bytes = new_bits; instr->length = num_bytes; } /* assume that the bits are now valid and the operands are not */ instr->flags |= INSTR_RAW_BITS_VALID; instr->flags |= INSTR_RAW_BITS_ALLOCATED; instr->flags &= ~INSTR_OPERANDS_VALID; instr->flags &= ~INSTR_EFLAGS_VALID; #ifdef X86 instr_set_rip_rel_valid(instr, false); /* relies on original raw bits */ #endif } void instr_set_label_callback(instr_t *instr, instr_label_callback_t cb) { CLIENT_ASSERT(instr_is_label(instr), "only set callback functions for label instructions"); CLIENT_ASSERT(instr->label_cb == NULL, "label callback function is already set"); CLIENT_ASSERT(!TEST(INSTR_RAW_BITS_ALLOCATED, instr->flags), "instruction's raw bits occupying label callback memory"); instr->label_cb = cb; } instr_label_callback_t instr_get_label_callback(instr_t *instr) { CLIENT_ASSERT(instr_is_label(instr), "only label instructions have a callback function"); CLIENT_ASSERT(!TEST(INSTR_RAW_BITS_ALLOCATED, instr->flags), "instruction's raw bits occupying label callback memory"); return instr->label_cb; } instr_t * instr_set_translation(instr_t *instr, app_pc addr) { #if defined(WINDOWS) && !defined(STANDALONE_DECODER) addr = get_app_pc_from_intercept_pc_if_necessary(addr); #endif instr->translation = addr; return instr; } app_pc instr_get_translation(instr_t *instr) { return instr->translation; } /* This makes it safe to keep an instr around indefinitely when an instrs raw * bits point into the cache. It allocates memory local to the instr to hold * a copy of the raw bits. If this was not done the original raw bits could * be deleted at some point. This is necessary if you want to keep an instr * around for a long time (for clients, beyond returning from the call that * gave you the instr) */ void instr_make_persistent(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; if ((instr->flags & INSTR_RAW_BITS_VALID) != 0 && (instr->flags & INSTR_RAW_BITS_ALLOCATED) == 0) { instr_allocate_raw_bits(dcontext, instr, instr->length); } } byte * instr_get_raw_bits(instr_t *instr) { return instr->bytes; } /* returns the pos-th instr byte */ byte instr_get_raw_byte(instr_t *instr, uint pos) { CLIENT_ASSERT(pos >= 0 && pos < instr->length && instr->bytes != NULL, "instr_get_raw_byte: ordinal invalid, or no raw bits"); return instr->bytes[pos]; } /* returns the 4 bytes starting at position pos */ uint instr_get_raw_word(instr_t *instr, uint pos) { CLIENT_ASSERT(pos >= 0 && pos + 3 < instr->length && instr->bytes != NULL, "instr_get_raw_word: ordinal invalid, or no raw bits"); return *((uint *)(instr->bytes + pos)); } /* Sets the pos-th instr byte by storing the unsigned * character value in the pos-th slot * Must call instr_allocate_raw_bits before calling this function * (original bits are read-only!) */ void instr_set_raw_byte(instr_t *instr, uint pos, byte val) { CLIENT_ASSERT((instr->flags & INSTR_RAW_BITS_ALLOCATED) != 0, "instr_set_raw_byte: no raw bits"); CLIENT_ASSERT(pos >= 0 && pos < instr->length && instr->bytes != NULL, "instr_set_raw_byte: ordinal invalid, or no raw bits"); instr->bytes[pos] = (byte)val; #ifdef X86 instr_set_rip_rel_valid(instr, false); /* relies on original raw bits */ #endif } /* Copies num_bytes bytes from start into the mangled bytes * array of instr. * Must call instr_allocate_raw_bits before calling this function. */ void instr_set_raw_bytes(instr_t *instr, byte *start, uint num_bytes) { CLIENT_ASSERT((instr->flags & INSTR_RAW_BITS_ALLOCATED) != 0, "instr_set_raw_bytes: no raw bits"); CLIENT_ASSERT(num_bytes <= instr->length && instr->bytes != NULL, "instr_set_raw_bytes: ordinal invalid, or no raw bits"); memcpy(instr->bytes, start, num_bytes); #ifdef X86 instr_set_rip_rel_valid(instr, false); /* relies on original raw bits */ #endif } /* Stores 32-bit value word in positions pos through pos+3 in * modified_bits. * Must call instr_allocate_raw_bits before calling this function. */ void instr_set_raw_word(instr_t *instr, uint pos, uint word) { CLIENT_ASSERT((instr->flags & INSTR_RAW_BITS_ALLOCATED) != 0, "instr_set_raw_word: no raw bits"); CLIENT_ASSERT(pos >= 0 && pos + 3 < instr->length && instr->bytes != NULL, "instr_set_raw_word: ordinal invalid, or no raw bits"); *((uint *)(instr->bytes + pos)) = word; #ifdef X86 instr_set_rip_rel_valid(instr, false); /* relies on original raw bits */ #endif } int instr_length(void *drcontext, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; int res; #ifdef ARM /* We can't handle IT blocks if we only track state on some instrs that * we have to encode for length, so unfortunately we must pay the cost * of tracking for every length call. */ encode_track_it_block(dcontext, instr); #endif if (!instr_needs_encoding(instr)) return instr->length; res = instr_length_arch(dcontext, instr); if (res != -1) return res; /* else, encode to find length */ return private_instr_encode(dcontext, instr, false /*don't need to cache*/); } instr_t * instr_set_encoding_hint(instr_t *instr, dr_encoding_hint_type_t hint) { instr->encoding_hints |= hint; return instr; } bool instr_has_encoding_hint(instr_t *instr, dr_encoding_hint_type_t hint) { return TEST(hint, instr->encoding_hints); } /***********************************************************************/ /* decoding routines */ /* If instr is at Level 0 (i.e., a bundled group of instrs as raw bits), * expands instr into a sequence of Level 1 instrs using decode_raw() which * are added in place to ilist. * Returns the replacement of instr, if any expansion is performed * (in which case the old instr is destroyed); otherwise returns * instr unchanged. * If encounters an invalid instr, stops expanding at that instr, and keeps * instr in the ilist pointing to the invalid bits as an invalid instr. */ instr_t * instr_expand(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { /* Sometimes deleting instr but sometimes not (when return early) * is painful -- so we go to the trouble of re-using instr * for the first expanded instr */ instr_t *newinstr, *firstinstr = NULL; int remaining_bytes, cur_inst_len; byte *curbytes, *newbytes; dr_isa_mode_t old_mode; /* make it easy for iterators: handle NULL * assume that if opcode is valid, is at Level 2, so not a bundle * do not expand meta-instrs -- FIXME: is that the right thing to do? */ if (instr == NULL || instr_opcode_valid(instr) || instr_is_meta(instr) || /* if an invalid instr (not just undecoded) do not try to expand */ !instr_valid(instr)) return instr; DOLOG(5, LOG_ALL, { /* disassembling might change the instruction object, we're cloning it * for the logger */ instr_t *log_instr = instr_clone(dcontext, instr); d_r_loginst(dcontext, 4, log_instr, "instr_expand"); instr_destroy(dcontext, log_instr); }); /* decode routines use dcontext mode, but we want instr mode */ dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode); /* never have opnds but not opcode */ CLIENT_ASSERT(!instr_operands_valid(instr), "instr_expand: opnds are already valid"); CLIENT_ASSERT(instr_raw_bits_valid(instr), "instr_expand: raw bits are invalid"); curbytes = instr->bytes; if ((uint)decode_sizeof(dcontext, curbytes, NULL _IF_X86_64(NULL)) == instr->length) { dr_set_isa_mode(dcontext, old_mode, NULL); return instr; /* Level 1 */ } remaining_bytes = instr->length; while (remaining_bytes > 0) { /* insert every separated instr into list */ newinstr = instr_create(dcontext); newbytes = decode_raw(dcontext, curbytes, newinstr); #ifndef NOT_DYNAMORIO_CORE_PROPER if (expand_should_set_translation(dcontext)) instr_set_translation(newinstr, curbytes); #endif if (newbytes == NULL) { /* invalid instr -- stop expanding, point instr at remaining bytes */ instr_set_raw_bits(instr, curbytes, remaining_bytes); instr_set_opcode(instr, OP_INVALID); if (firstinstr == NULL) firstinstr = instr; instr_destroy(dcontext, newinstr); dr_set_isa_mode(dcontext, old_mode, NULL); return firstinstr; } DOLOG(5, LOG_ALL, { d_r_loginst(dcontext, 4, newinstr, "\tjust expanded into"); }); /* CAREFUL of what you call here -- don't call anything that * auto-upgrades instr to Level 2, it will fail on Level 0 bundles! */ if (instr_has_allocated_bits(instr) && !instr_is_cti_short_rewrite(newinstr, curbytes)) { /* make sure to have our own copy of any allocated bits * before we destroy the original instr */ IF_X64(CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(newbytes - curbytes), "instr_expand: internal truncation error")); instr_allocate_raw_bits(dcontext, newinstr, (uint)(newbytes - curbytes)); } /* special case: for cti_short, do not fully decode the * constituent instructions, leave as a bundle. * the instr will still have operands valid. */ if (instr_is_cti_short_rewrite(newinstr, curbytes)) { newbytes = remangle_short_rewrite(dcontext, newinstr, curbytes, 0); } else if (instr_is_cti_short(newinstr)) { /* make sure non-mangled short ctis, which are generated by * us and never left there from app's, are not marked as exit ctis */ instr_set_meta(newinstr); } IF_X64(CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(newbytes - curbytes), "instr_expand: internal truncation error")); cur_inst_len = (int)(newbytes - curbytes); remaining_bytes -= cur_inst_len; curbytes = newbytes; instrlist_preinsert(ilist, instr, newinstr); if (firstinstr == NULL) firstinstr = newinstr; } /* delete original instr from list */ instrlist_remove(ilist, instr); instr_destroy(dcontext, instr); CLIENT_ASSERT(firstinstr != NULL, "instr_expand failure"); dr_set_isa_mode(dcontext, old_mode, NULL); return firstinstr; } bool instr_is_level_0(instr_t *instr) { dcontext_t *dcontext = get_thread_private_dcontext(); dr_isa_mode_t old_mode; /* assume that if opcode is valid, is at Level 2, so not a bundle * do not expand meta-instrs -- FIXME: is that the right to do? */ if (instr == NULL || instr_opcode_valid(instr) || instr_is_meta(instr) || /* if an invalid instr (not just undecoded) do not try to expand */ !instr_valid(instr)) return false; /* never have opnds but not opcode */ CLIENT_ASSERT(!instr_operands_valid(instr), "instr_is_level_0: opnds are already valid"); CLIENT_ASSERT(instr_raw_bits_valid(instr), "instr_is_level_0: raw bits are invalid"); dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode); if ((uint)decode_sizeof(dcontext, instr->bytes, NULL _IF_X86_64(NULL)) == instr->length) { dr_set_isa_mode(dcontext, old_mode, NULL); return false; /* Level 1 */ } dr_set_isa_mode(dcontext, old_mode, NULL); return true; } /* If the next instr is at Level 0 (i.e., a bundled group of instrs as raw bits), * expands it into a sequence of Level 1 instrs using decode_raw() which * are added in place to ilist. Then returns the new next instr. */ instr_t * instr_get_next_expanded(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { instr_expand(dcontext, ilist, instr_get_next(instr)); return instr_get_next(instr); } /* If the prev instr is at Level 0 (i.e., a bundled group of instrs as raw bits), * expands it into a sequence of Level 1 instrs using decode_raw() which * are added in place to ilist. Then returns the new prev instr. */ instr_t * instr_get_prev_expanded(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { instr_expand(dcontext, ilist, instr_get_prev(instr)); return instr_get_prev(instr); } /* If the first instr is at Level 0 (i.e., a bundled group of instrs as raw bits), * expands it into a sequence of Level 1 instrs using decode_raw() which * are added in place to ilist. Then returns the new first instr. */ instr_t * instrlist_first_expanded(dcontext_t *dcontext, instrlist_t *ilist) { instr_expand(dcontext, ilist, instrlist_first(ilist)); return instrlist_first(ilist); } /* If the last instr is at Level 0 (i.e., a bundled group of instrs as raw bits), * expands it into a sequence of Level 1 instrs using decode_raw() which * are added in place to ilist. Then returns the new last instr. */ instr_t * instrlist_last_expanded(dcontext_t *dcontext, instrlist_t *ilist) { instr_expand(dcontext, ilist, instrlist_last(ilist)); return instrlist_last(ilist); } /* If instr is not already at the level of decode_cti, decodes enough * from the raw bits pointed to by instr to bring it to that level. * Assumes that instr is a single instr (i.e., NOT Level 0). * * decode_cti decodes only enough of instr to determine * its size, its effects on the 6 arithmetic eflags, and whether it is * a control-transfer instruction. If it is, the operands fields of * instr are filled in. If not, only the raw bits fields of instr are * filled in. This corresponds to a Level 3 decoding for control * transfer instructions but a Level 1 decoding plus arithmetic eflags * information for all other instructions. */ void instr_decode_cti(dcontext_t *dcontext, instr_t *instr) { /* if arith flags are missing but otherwise decoded, who cares, * next get_arith_flags() will fill it in */ if (!instr_opcode_valid(instr) || (instr_is_cti(instr) && !instr_operands_valid(instr))) { byte *next_pc; DEBUG_EXT_DECLARE(int old_len = instr->length;) /* decode_cti() will use the dcontext mode, but we want the instr mode */ dr_isa_mode_t old_mode; dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode); CLIENT_ASSERT(instr_raw_bits_valid(instr), "instr_decode_cti: raw bits are invalid"); instr_reuse(dcontext, instr); next_pc = decode_cti(dcontext, instr->bytes, instr); dr_set_isa_mode(dcontext, old_mode, NULL); /* ok to be invalid, let caller deal with it */ CLIENT_ASSERT(next_pc == NULL || (next_pc - instr->bytes == old_len), "instr_decode_cti requires a Level 1 or higher instruction"); } } /* If instr is not already at the level of decode_opcode, decodes enough * from the raw bits pointed to by instr to bring it to that level. * Assumes that instr is a single instr (i.e., NOT Level 0). * * decode_opcode decodes the opcode and eflags usage of the instruction. * This corresponds to a Level 2 decoding. */ void instr_decode_opcode(dcontext_t *dcontext, instr_t *instr) { if (!instr_opcode_valid(instr)) { byte *next_pc; DEBUG_EXT_DECLARE(int old_len = instr->length;) #ifdef X86 bool rip_rel_valid = instr_rip_rel_valid(instr); #endif /* decode_opcode() will use the dcontext mode, but we want the instr mode */ dr_isa_mode_t old_mode; dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode); CLIENT_ASSERT(instr_raw_bits_valid(instr), "instr_decode_opcode: raw bits are invalid"); instr_reuse(dcontext, instr); next_pc = decode_opcode(dcontext, instr->bytes, instr); dr_set_isa_mode(dcontext, old_mode, NULL); #ifdef X86 /* decode_opcode sets raw bits which invalidates rip_rel, but * it should still be valid on an up-decode of the opcode */ if (rip_rel_valid) instr_set_rip_rel_pos(instr, instr->rip_rel_pos); #endif /* ok to be invalid, let caller deal with it */ CLIENT_ASSERT(next_pc == NULL || (next_pc - instr->bytes == old_len), "instr_decode_opcode requires a Level 1 or higher instruction"); } } /* If instr is not already fully decoded, decodes enough * from the raw bits pointed to by instr to bring it Level 3. * Assumes that instr is a single instr (i.e., NOT Level 0). */ void instr_decode(dcontext_t *dcontext, instr_t *instr) { if (!instr_operands_valid(instr)) { byte *next_pc; DEBUG_EXT_DECLARE(int old_len = instr->length;) #ifdef X86 bool rip_rel_valid = instr_rip_rel_valid(instr); #endif /* decode() will use the current dcontext mode, but we want the instr mode */ dr_isa_mode_t old_mode; dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode); CLIENT_ASSERT(instr_raw_bits_valid(instr), "instr_decode: raw bits are invalid"); instr_reuse(dcontext, instr); next_pc = decode(dcontext, instr_get_raw_bits(instr), instr); #ifndef NOT_DYNAMORIO_CORE_PROPER if (expand_should_set_translation(dcontext)) instr_set_translation(instr, instr_get_raw_bits(instr)); #endif dr_set_isa_mode(dcontext, old_mode, NULL); #ifdef X86 /* decode sets raw bits which invalidates rip_rel, but * it should still be valid on an up-decode */ if (rip_rel_valid) instr_set_rip_rel_pos(instr, instr->rip_rel_pos); #endif /* ok to be invalid, let caller deal with it */ CLIENT_ASSERT(next_pc == NULL || (next_pc - instr->bytes == old_len), "instr_decode requires a Level 1 or higher instruction"); } } /* Calls instr_decode() with the current dcontext. Mostly useful as the slow * path for IR routines that get inlined. */ NOINLINE /* rarely called */ instr_t * instr_decode_with_current_dcontext(instr_t *instr) { instr_decode(get_thread_private_dcontext(), instr); return instr; } /* Brings all instrs in ilist up to the decode_cti level, and * hooks up intra-ilist cti targets to use instr_t targets, by * matching pc targets to each instruction's raw bits. * * decode_cti decodes only enough of instr to determine * its size, its effects on the 6 arithmetic eflags, and whether it is * a control-transfer instruction. If it is, the operands fields of * instr are filled in. If not, only the raw bits fields of instr are * filled in. This corresponds to a Level 3 decoding for control * transfer instructions but a Level 1 decoding plus arithmetic eflags * information for all other instructions. */ void instrlist_decode_cti(dcontext_t *dcontext, instrlist_t *ilist) { instr_t *instr; LOG(THREAD, LOG_ALL, 3, "\ninstrlist_decode_cti\n"); DOLOG(4, LOG_ALL, { LOG(THREAD, LOG_ALL, 4, "beforehand:\n"); instrlist_disassemble(dcontext, 0, ilist, THREAD); }); /* just use the expanding iterator to get to Level 1, then decode cti */ for (instr = instrlist_first_expanded(dcontext, ilist); instr != NULL; instr = instr_get_next_expanded(dcontext, ilist, instr)) { /* if arith flags are missing but otherwise decoded, who cares, * next get_arith_flags() will fill it in */ if (!instr_opcode_valid(instr) || (instr_is_cti(instr) && !instr_operands_valid(instr))) { DOLOG(4, LOG_ALL, { d_r_loginst(dcontext, 4, instr, "instrlist_decode_cti: about to decode"); }); instr_decode_cti(dcontext, instr); DOLOG(4, LOG_ALL, { d_r_loginst(dcontext, 4, instr, "\tjust decoded"); }); } } /* must fix up intra-ilist cti's to have instr_t targets * assumption: all intra-ilist cti's have been marked as do-not-mangle, * plus all targets have their raw bits already set */ for (instr = instrlist_first(ilist); instr != NULL; instr = instr_get_next(instr)) { /* N.B.: if we change exit cti's to have instr_t targets, we have to * change other modules like emit to handle that! * FIXME */ if (!instr_is_exit_cti(instr) && instr_opcode_valid(instr) && /* decode_cti only filled in cti opcodes */ instr_is_cti(instr) && instr_num_srcs(instr) > 0 && opnd_is_near_pc(instr_get_src(instr, 0))) { instr_t *tgt; DOLOG(4, LOG_ALL, { d_r_loginst(dcontext, 4, instr, "instrlist_decode_cti: found cti w/ pc target"); }); for (tgt = instrlist_first(ilist); tgt != NULL; tgt = instr_get_next(tgt)) { DOLOG(4, LOG_ALL, { d_r_loginst(dcontext, 4, tgt, "\tchecking"); }); LOG(THREAD, LOG_INTERP | LOG_OPTS, 4, "\t\taddress is " PFX "\n", instr_get_raw_bits(tgt)); if (opnd_get_pc(instr_get_target(instr)) == instr_get_raw_bits(tgt)) { /* cti targets this instr */ app_pc bits = 0; int len = 0; if (instr_raw_bits_valid(instr)) { bits = instr_get_raw_bits(instr); len = instr_length(dcontext, instr); } instr_set_target(instr, opnd_create_instr(tgt)); if (bits != 0) instr_set_raw_bits(instr, bits, len); DOLOG(4, LOG_ALL, { d_r_loginst(dcontext, 4, tgt, "\tcti targets this"); }); break; } } } } DOLOG(4, LOG_ALL, { LOG(THREAD, LOG_ALL, 4, "afterward:\n"); instrlist_disassemble(dcontext, 0, ilist, THREAD); }); LOG(THREAD, LOG_ALL, 4, "done with instrlist_decode_cti\n"); } /****************************************************************************/ /* utility routines */ void d_r_loginst(dcontext_t *dcontext, uint level, instr_t *instr, const char *string) { DOLOG(level, LOG_ALL, { LOG(THREAD, LOG_ALL, level, "%s: ", string); instr_disassemble(dcontext, instr, THREAD); LOG(THREAD, LOG_ALL, level, "\n"); }); } void d_r_logopnd(dcontext_t *dcontext, uint level, opnd_t opnd, const char *string) { DOLOG(level, LOG_ALL, { LOG(THREAD, LOG_ALL, level, "%s: ", string); opnd_disassemble(dcontext, opnd, THREAD); LOG(THREAD, LOG_ALL, level, "\n"); }); } void d_r_logtrace(dcontext_t *dcontext, uint level, instrlist_t *trace, const char *string) { DOLOG(level, LOG_ALL, { instr_t *inst; instr_t *next_inst; LOG(THREAD, LOG_ALL, level, "%s:\n", string); for (inst = instrlist_first(trace); inst != NULL; inst = next_inst) { next_inst = instr_get_next(inst); instr_disassemble(dcontext, inst, THREAD); LOG(THREAD, LOG_ALL, level, "\n"); } LOG(THREAD, LOG_ALL, level, "\n"); }); } /* Shrinks all registers not used as addresses, and all immed int and * address sizes, to 16 bits */ void instr_shrink_to_16_bits(instr_t *instr) { int i; opnd_t opnd; const instr_info_t *info; byte optype; CLIENT_ASSERT(instr_operands_valid(instr), "instr_shrink_to_16_bits: invalid opnds"); info = get_encoding_info(instr); for (i = 0; i < instr_num_dsts(instr); i++) { opnd = instr_get_dst(instr, i); /* some non-memory references vary in size by addr16, not data16: * e.g., the edi/esi inc/dec of string instrs */ optype = instr_info_opnd_type(info, false /*dst*/, i); if (!opnd_is_memory_reference(opnd) && !optype_is_indir_reg(optype)) { instr_set_dst(instr, i, opnd_shrink_to_16_bits(opnd)); } } for (i = 0; i < instr_num_srcs(instr); i++) { opnd = instr_get_src(instr, i); optype = instr_info_opnd_type(info, true /*dst*/, i); if (!opnd_is_memory_reference(opnd) && !optype_is_indir_reg(optype)) { instr_set_src(instr, i, opnd_shrink_to_16_bits(opnd)); } } } #ifdef X64 /* Shrinks all registers, including addresses, and all immed int and * address sizes, to 32 bits */ void instr_shrink_to_32_bits(instr_t *instr) { int i; opnd_t opnd; CLIENT_ASSERT(instr_operands_valid(instr), "instr_shrink_to_32_bits: invalid opnds"); for (i = 0; i < instr_num_dsts(instr); i++) { opnd = instr_get_dst(instr, i); instr_set_dst(instr, i, opnd_shrink_to_32_bits(opnd)); } for (i = 0; i < instr_num_srcs(instr); i++) { opnd = instr_get_src(instr, i); if (opnd_is_immed_int(opnd)) { CLIENT_ASSERT(opnd_get_immed_int(opnd) <= INT_MAX, "instr_shrink_to_32_bits: immed int will be truncated"); } instr_set_src(instr, i, opnd_shrink_to_32_bits(opnd)); } } #endif bool instr_uses_reg(instr_t *instr, reg_id_t reg) { return (instr_reg_in_dst(instr, reg) || instr_reg_in_src(instr, reg)); } bool instr_reg_in_dst(instr_t *instr, reg_id_t reg) { int i; for (i = 0; i < instr_num_dsts(instr); i++) { if (opnd_uses_reg(instr_get_dst(instr, i), reg)) return true; } return false; } bool instr_reg_in_src(instr_t *instr, reg_id_t reg) { int i; #ifdef X86 /* special case (we don't want all of instr_is_nop() special-cased: just this one) */ if (instr_get_opcode(instr) == OP_nop_modrm) return false; #endif for (i = 0; i < instr_num_srcs(instr); i++) { if (opnd_uses_reg(instr_get_src(instr, i), reg)) return true; } return false; } /* checks regs in dest base-disp but not dest reg */ bool instr_reads_from_reg(instr_t *instr, reg_id_t reg, dr_opnd_query_flags_t flags) { int i; opnd_t opnd; if (!TEST(DR_QUERY_INCLUDE_COND_SRCS, flags) && instr_is_predicated(instr) && !instr_predicate_reads_srcs(instr_get_predicate(instr))) return false; if (instr_reg_in_src(instr, reg)) return true; /* As a special case, the addressing registers inside a destination memory * operand are covered by DR_QUERY_INCLUDE_COND_SRCS rather than * DR_QUERY_INCLUDE_COND_DSTS (i#1849). */ for (i = 0; i < instr_num_dsts(instr); i++) { opnd = instr_get_dst(instr, i); if (!opnd_is_reg(opnd) && opnd_uses_reg(opnd, reg)) return true; } return false; } /* In this func, it must be the exact same register, not a sub reg. ie. eax!=ax */ bool instr_reads_from_exact_reg(instr_t *instr, reg_id_t reg, dr_opnd_query_flags_t flags) { int i; opnd_t opnd; if (!TEST(DR_QUERY_INCLUDE_COND_SRCS, flags) && instr_is_predicated(instr) && !instr_predicate_reads_srcs(instr_get_predicate(instr))) return false; #ifdef X86 /* special case */ if (instr_get_opcode(instr) == OP_nop_modrm) return false; #endif for (i = 0; i < instr_num_srcs(instr); i++) { opnd = instr_get_src(instr, i); if (opnd_is_reg(opnd) && opnd_get_reg(opnd) == reg && opnd_get_size(opnd) == reg_get_size(reg)) return true; else if (opnd_is_base_disp(opnd) && (opnd_get_base(opnd) == reg || opnd_get_index(opnd) == reg || opnd_get_segment(opnd) == reg)) return true; } for (i = 0; i < instr_num_dsts(instr); i++) { opnd = instr_get_dst(instr, i); if (opnd_is_base_disp(opnd) && (opnd_get_base(opnd) == reg || opnd_get_index(opnd) == reg || opnd_get_segment(opnd) == reg)) return true; } return false; } /* this checks sub-registers */ bool instr_writes_to_reg(instr_t *instr, reg_id_t reg, dr_opnd_query_flags_t flags) { int i; opnd_t opnd; if (!TEST(DR_QUERY_INCLUDE_COND_DSTS, flags) && instr_is_predicated(instr)) return false; for (i = 0; i < instr_num_dsts(instr); i++) { opnd = instr_get_dst(instr, i); if (opnd_is_reg(opnd) && (dr_reg_fixer[opnd_get_reg(opnd)] == dr_reg_fixer[reg])) return true; } return false; } /* In this func, it must be the exact same register, not a sub reg. ie. eax!=ax */ bool instr_writes_to_exact_reg(instr_t *instr, reg_id_t reg, dr_opnd_query_flags_t flags) { int i; opnd_t opnd; if (!TEST(DR_QUERY_INCLUDE_COND_DSTS, flags) && instr_is_predicated(instr)) return false; for (i = 0; i < instr_num_dsts(instr); i++) { opnd = instr_get_dst(instr, i); if (opnd_is_reg(opnd) && (opnd_get_reg(opnd) == reg) /* for case like OP_movt on ARM and SIMD regs on X86, * partial reg written with full reg name in opnd */ && opnd_get_size(opnd) == reg_get_size(reg)) return true; } return false; } bool instr_replace_src_opnd(instr_t *instr, opnd_t old_opnd, opnd_t new_opnd) { int srcs, a; srcs = instr_num_srcs(instr); for (a = 0; a < srcs; a++) { if (opnd_same(instr_get_src(instr, a), old_opnd) || opnd_same_address(instr_get_src(instr, a), old_opnd)) { instr_set_src(instr, a, new_opnd); return true; } } return false; } bool instr_replace_reg_resize(instr_t *instr, reg_id_t old_reg, reg_id_t new_reg) { int i; bool found = false; for (i = 0; i < instr_num_srcs(instr); i++) { opnd_t opnd = instr_get_src(instr, i); if (opnd_uses_reg(opnd, old_reg)) { found = true; opnd_replace_reg_resize(&opnd, old_reg, new_reg); instr_set_src(instr, i, opnd); } } for (i = 0; i < instr_num_dsts(instr); i++) { opnd_t opnd = instr_get_dst(instr, i); if (opnd_uses_reg(opnd, old_reg)) { found = true; opnd_replace_reg_resize(&opnd, old_reg, new_reg); instr_set_dst(instr, i, opnd); } } return found; } bool instr_same(instr_t *inst1, instr_t *inst2) { int dsts, srcs, a; if (instr_get_opcode(inst1) != instr_get_opcode(inst2)) return false; if ((srcs = instr_num_srcs(inst1)) != instr_num_srcs(inst2)) return false; for (a = 0; a < srcs; a++) { if (!opnd_same(instr_get_src(inst1, a), instr_get_src(inst2, a))) return false; } if ((dsts = instr_num_dsts(inst1)) != instr_num_dsts(inst2)) return false; for (a = 0; a < dsts; a++) { if (!opnd_same(instr_get_dst(inst1, a), instr_get_dst(inst2, a))) return false; } /* We encode some prefixes in the operands themselves, such that * we shouldn't consider the whole-instr_t flags when considering * equality of Instrs */ if ((instr_get_prefixes(inst1) & PREFIX_SIGNIFICANT) != (instr_get_prefixes(inst2) & PREFIX_SIGNIFICANT)) return false; if (instr_get_isa_mode(inst1) != instr_get_isa_mode(inst2)) return false; if (instr_get_predicate(inst1) != instr_get_predicate(inst2)) return false; return true; } bool instr_reads_memory(instr_t *instr) { int a; opnd_t curop; int opc = instr_get_opcode(instr); if (opc_is_not_a_real_memory_load(opc)) return false; for (a = 0; a < instr_num_srcs(instr); a++) { curop = instr_get_src(instr, a); if (opnd_is_memory_reference(curop)) { return true; } } return false; } bool instr_writes_memory(instr_t *instr) { int a; opnd_t curop; for (a = 0; a < instr_num_dsts(instr); a++) { curop = instr_get_dst(instr, a); if (opnd_is_memory_reference(curop)) { return true; } } return false; } #ifdef X86 bool instr_zeroes_ymmh(instr_t *instr) { int i; const instr_info_t *info = get_encoding_info(instr); if (info == NULL) return false; /* legacy instrs always preserve top half of ymm */ if (!TEST(REQUIRES_VEX, info->flags)) return false; for (i = 0; i < instr_num_dsts(instr); i++) { opnd_t opnd = instr_get_dst(instr, i); if (opnd_is_reg(opnd) && reg_is_xmm(opnd_get_reg(opnd)) && !reg_is_ymm(opnd_get_reg(opnd))) return true; } return false; } bool instr_is_xsave(instr_t *instr) { int opcode = instr_get_opcode(instr); /* force decode */ if (opcode == OP_xsave32 || opcode == OP_xsaveopt32 || opcode == OP_xsave64 || opcode == OP_xsaveopt64 || opcode == OP_xsavec32 || opcode == OP_xsavec64) return true; return false; } #endif /* X86 */ /* PR 251479: support general re-relativization. If INSTR_RIP_REL_VALID is set and * the raw bits are valid, instr->rip_rel_pos is assumed to hold the offset into the * instr of a 32-bit rip-relative displacement, which is used to re-relativize during * encoding. We only use this for level 1-3 instrs, and we invalidate it if the raw * bits are modified at all. * For caching the encoded bytes of a Level 4 instr, instr_encode() sets * the rip_rel_pos field and flag without setting the raw bits valid: * private_instr_encode() then sets the raw bits, after examing the rip rel flag * by itself. Thus, we must invalidate the rip rel flag when we invalidate * raw bits: we can't rely just on the raw bits invalidation. * There can only be one rip-relative operand per instruction. */ /* TODO i#4016: for AArchXX we don't have a large displacement on every reference. * Some have no disp at all, others have just 12 bits or smaller. * We need to come up with a strategy for handling encode-time re-relativization. * Xref copy_and_re_relativize_raw_instr(). * For now, we do use some of these routines, but none that use the rip_rel_pos. */ #ifdef X86 bool instr_rip_rel_valid(instr_t *instr) { return instr_raw_bits_valid(instr) && TEST(INSTR_RIP_REL_VALID, instr->flags); } void instr_set_rip_rel_valid(instr_t *instr, bool valid) { if (valid) instr->flags |= INSTR_RIP_REL_VALID; else instr->flags &= ~INSTR_RIP_REL_VALID; } uint instr_get_rip_rel_pos(instr_t *instr) { return instr->rip_rel_pos; } void instr_set_rip_rel_pos(instr_t *instr, uint pos) { CLIENT_ASSERT_TRUNCATE(instr->rip_rel_pos, byte, pos, "instr_set_rip_rel_pos: offs must be <= 256"); instr->rip_rel_pos = (byte)pos; instr_set_rip_rel_valid(instr, true); } #endif /* X86 */ #ifdef X86 static bool instr_has_rip_rel_instr_operand(instr_t *instr) { /* XXX: See comment in instr_get_rel_target() about distinguishing data from * instr rip-rel operands. We don't want to go so far as adding yet more * data plumbed through the decode_fast tables. * Perhaps we should instead break compatibility and have all these relative * target and operand index routines include instr operands, and update * mangle_rel_addr() to somehow distinguish instr on its own? * For now we get by with the simple check for a cti or xbegin. * No instruction has 2 rip-rel immeds so a direct cti must be instr. */ return (instr_is_cti(instr) && !instr_is_mbr(instr)) || instr_get_opcode(instr) == OP_xbegin; } #endif bool instr_get_rel_target(instr_t *instr, /*OUT*/ app_pc *target, bool data_only) { if (!instr_valid(instr)) return false; /* For PC operands we have to look at the high-level *before* rip_rel_pos, to * support decode_from_copy(). As documented, we ignore instr_t targets. */ if (!data_only && instr_operands_valid(instr) && instr_num_srcs(instr) > 0 && opnd_is_pc(instr_get_src(instr, 0))) { if (target != NULL) *target = opnd_get_pc(instr_get_src(instr, 0)); return true; } #ifdef X86 /* PR 251479: we support rip-rel info in level 1 instrs */ if (instr_rip_rel_valid(instr)) { int rip_rel_pos = instr_get_rip_rel_pos(instr); if (rip_rel_pos > 0) { if (data_only) { /* XXX: Distinguishing data from instr is a pain here b/c it might be * during init (e.g., callback.c's copy_app_code()) and we can't * easily do an up-decode (hence the separate "local" instr_t below). * We do it partly for backward compatibility for external callers, * but also for our own mangle_rel_addr(). Would it be cleaner some * other way: breaking compat and not supporting data-only here and * having mangle call instr_set_rip_rel_valid() for all cti's (and * xbegin)? */ bool not_data = false; if (!instr_opcode_valid(instr) && get_thread_private_dcontext() == NULL) { instr_t local; instr_init(GLOBAL_DCONTEXT, &local); if (decode_opcode(GLOBAL_DCONTEXT, instr_get_raw_bits(instr), &local) != NULL) { not_data = instr_has_rip_rel_instr_operand(&local); } instr_free(GLOBAL_DCONTEXT, &local); } else not_data = instr_has_rip_rel_instr_operand(instr); if (not_data) return false; } if (target != NULL) { /* We only support non-4-byte rip-rel disps for 1-byte instr-final * (jcc_short). */ if (rip_rel_pos + 1 == (int)instr->length) { *target = instr->bytes + instr->length + *((char *)(instr->bytes + rip_rel_pos)); } else { ASSERT(rip_rel_pos + 4 <= (int)instr->length); *target = instr->bytes + instr->length + *((int *)(instr->bytes + rip_rel_pos)); } } return true; } else return false; } #endif #if defined(X64) || defined(ARM) int i; opnd_t curop; /* else go to level 3 operands */ for (i = 0; i < instr_num_dsts(instr); i++) { curop = instr_get_dst(instr, i); IF_ARM_ELSE( { /* DR_REG_PC as an index register is not allowed */ if (opnd_is_base_disp(curop) && opnd_get_base(curop) == DR_REG_PC) { if (target != NULL) { *target = opnd_get_disp(curop) + decode_cur_pc(instr_get_app_pc(instr), instr_get_isa_mode(instr), instr_get_opcode(instr), instr); } return true; } }, { if (opnd_is_rel_addr(curop)) { if (target != NULL) *target = opnd_get_addr(curop); return true; } }); } for (i = 0; i < instr_num_srcs(instr); i++) { curop = instr_get_src(instr, i); IF_ARM_ELSE( { /* DR_REG_PC as an index register is not allowed */ if (opnd_is_base_disp(curop) && opnd_get_base(curop) == DR_REG_PC) { if (target != NULL) { *target = opnd_get_disp(curop) + decode_cur_pc(instr_get_app_pc(instr), instr_get_isa_mode(instr), instr_get_opcode(instr), instr); } return true; } }, { if (opnd_is_rel_addr(curop)) { if (target != NULL) *target = opnd_get_addr(curop); return true; } }); } #endif return false; } bool instr_get_rel_data_or_instr_target(instr_t *instr, /*OUT*/ app_pc *target) { return instr_get_rel_target(instr, target, false /*all*/); } #if defined(X64) || defined(ARM) bool instr_get_rel_addr_target(instr_t *instr, /*OUT*/ app_pc *target) { return instr_get_rel_target(instr, target, true /*data-only*/); } bool instr_has_rel_addr_reference(instr_t *instr) { return instr_get_rel_addr_target(instr, NULL); } int instr_get_rel_addr_dst_idx(instr_t *instr) { int i; opnd_t curop; if (!instr_valid(instr)) return -1; /* must go to level 3 operands */ for (i = 0; i < instr_num_dsts(instr); i++) { curop = instr_get_dst(instr, i); IF_ARM_ELSE( { if (opnd_is_base_disp(curop) && opnd_get_base(curop) == DR_REG_PC) return i; }, { if (opnd_is_rel_addr(curop)) return i; }); } return -1; } int instr_get_rel_addr_src_idx(instr_t *instr) { int i; opnd_t curop; if (!instr_valid(instr)) return -1; /* must go to level 3 operands */ for (i = 0; i < instr_num_srcs(instr); i++) { curop = instr_get_src(instr, i); IF_ARM_ELSE( { if (opnd_is_base_disp(curop) && opnd_get_base(curop) == DR_REG_PC) return i; }, { if (opnd_is_rel_addr(curop)) return i; }); } return -1; } #endif /* X64 || ARM */ bool instr_is_our_mangling(instr_t *instr) { return TEST(INSTR_OUR_MANGLING, instr->flags); } void instr_set_our_mangling(instr_t *instr, bool ours) { if (ours) instr->flags |= INSTR_OUR_MANGLING; else instr->flags &= ~INSTR_OUR_MANGLING; } bool instr_is_our_mangling_epilogue(instr_t *instr) { ASSERT(!TEST(INSTR_OUR_MANGLING_EPILOGUE, instr->flags) || instr_is_our_mangling(instr)); return TEST(INSTR_OUR_MANGLING_EPILOGUE, instr->flags); } void instr_set_our_mangling_epilogue(instr_t *instr, bool epilogue) { if (epilogue) { instr->flags |= INSTR_OUR_MANGLING_EPILOGUE; } else instr->flags &= ~INSTR_OUR_MANGLING_EPILOGUE; } instr_t * instr_set_translation_mangling_epilogue(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { if (instrlist_get_translation_target(ilist) != NULL) { int sz = decode_sizeof(dcontext, instrlist_get_translation_target(ilist), NULL _IF_X86_64(NULL)); instr_set_translation(instr, instrlist_get_translation_target(ilist) + sz); } instr_set_our_mangling_epilogue(instr, true); return instr; } /* Emulates instruction to find the address of the index-th memory operand. * Either or both OUT variables can be NULL. */ static bool instr_compute_address_helper(instr_t *instr, priv_mcontext_t *mc, size_t mc_size, dr_mcontext_flags_t mc_flags, uint index, OUT app_pc *addr, OUT bool *is_write, OUT uint *pos) { /* for string instr, even w/ rep prefix, assume want value at point of * register snapshot passed in */ int i; opnd_t curop = { 0 }; int memcount = -1; bool write = false; bool have_addr = false; /* We allow not selecting xmm fields since clients may legitimately * emulate a memref w/ just GPRs */ CLIENT_ASSERT(TESTALL(DR_MC_CONTROL | DR_MC_INTEGER, mc_flags), "dr_mcontext_t.flags must include DR_MC_CONTROL and DR_MC_INTEGER"); for (i = 0; i < instr_num_dsts(instr); i++) { curop = instr_get_dst(instr, i); if (opnd_is_memory_reference(curop)) { if (opnd_is_vsib(curop)) { #ifdef X86 if (instr_compute_address_VSIB(instr, mc, mc_size, mc_flags, curop, index, &have_addr, addr, &write)) { CLIENT_ASSERT( write, "VSIB found in destination but instruction is not a scatter"); break; } else { return false; } #else CLIENT_ASSERT(false, "VSIB should be x86-only"); #endif } memcount++; if (memcount == (int)index) { write = true; break; } } } if (!write && memcount != (int)index && /* lea has a mem_ref source operand, but doesn't actually read */ !opc_is_not_a_real_memory_load(instr_get_opcode(instr))) { for (i = 0; i < instr_num_srcs(instr); i++) { curop = instr_get_src(instr, i); if (opnd_is_memory_reference(curop)) { if (opnd_is_vsib(curop)) { #ifdef X86 if (instr_compute_address_VSIB(instr, mc, mc_size, mc_flags, curop, index, &have_addr, addr, &write)) break; else return false; #else CLIENT_ASSERT(false, "VSIB should be x86-only"); #endif } memcount++; if (memcount == (int)index) break; } } } if (!have_addr) { if (memcount != (int)index) return false; if (addr != NULL) *addr = opnd_compute_address_priv(curop, mc); } if (is_write != NULL) *is_write = write; if (pos != 0) *pos = i; return true; } bool instr_compute_address_ex_priv(instr_t *instr, priv_mcontext_t *mc, uint index, OUT app_pc *addr, OUT bool *is_write, OUT uint *pos) { return instr_compute_address_helper(instr, mc, sizeof(*mc), DR_MC_ALL, index, addr, is_write, pos); } DR_API bool instr_compute_address_ex(instr_t *instr, dr_mcontext_t *mc, uint index, OUT app_pc *addr, OUT bool *is_write) { return instr_compute_address_helper(instr, dr_mcontext_as_priv_mcontext(mc), mc->size, mc->flags, index, addr, is_write, NULL); } /* i#682: add pos so that the caller knows which opnd is used. */ DR_API bool instr_compute_address_ex_pos(instr_t *instr, dr_mcontext_t *mc, uint index, OUT app_pc *addr, OUT bool *is_write, OUT uint *pos) { return instr_compute_address_helper(instr, dr_mcontext_as_priv_mcontext(mc), mc->size, mc->flags, index, addr, is_write, pos); } /* Returns NULL if none of instr's operands is a memory reference. * Otherwise, returns the effective address of the first memory operand * when the operands are considered in this order: destinations and then * sources. The address is computed using the passed-in registers. */ app_pc instr_compute_address_priv(instr_t *instr, priv_mcontext_t *mc) { app_pc addr; if (!instr_compute_address_ex_priv(instr, mc, 0, &addr, NULL, NULL)) return NULL; return addr; } DR_API app_pc instr_compute_address(instr_t *instr, dr_mcontext_t *mc) { app_pc addr; if (!instr_compute_address_ex(instr, mc, 0, &addr, NULL)) return NULL; return addr; } /* Calculates the size, in bytes, of the memory read or write of instr * If instr does not reference memory, or is invalid, returns 0 */ uint instr_memory_reference_size(instr_t *instr) { int i; if (!instr_valid(instr)) return 0; for (i = 0; i < instr_num_dsts(instr); i++) { if (opnd_is_memory_reference(instr_get_dst(instr, i))) { return opnd_size_in_bytes(opnd_get_size(instr_get_dst(instr, i))); } } for (i = 0; i < instr_num_srcs(instr); i++) { if (opnd_is_memory_reference(instr_get_src(instr, i))) { return opnd_size_in_bytes(opnd_get_size(instr_get_src(instr, i))); } } return 0; } /* Calculates the size, in bytes, of the memory read or write of * the instr at pc. * Returns the pc of the following instr. * If the instr at pc does not reference memory, or is invalid, * returns NULL. */ app_pc decode_memory_reference_size(void *drcontext, app_pc pc, uint *size_in_bytes) { dcontext_t *dcontext = (dcontext_t *)drcontext; app_pc next_pc; instr_t instr; instr_init(dcontext, &instr); next_pc = decode(dcontext, pc, &instr); if (!instr_valid(&instr)) return NULL; CLIENT_ASSERT(size_in_bytes != NULL, "decode_memory_reference_size: passed NULL"); *size_in_bytes = instr_memory_reference_size(&instr); instr_free(dcontext, &instr); return next_pc; } DR_API dr_instr_label_data_t * instr_get_label_data_area(instr_t *instr) { CLIENT_ASSERT(instr != NULL, "invalid arg"); if (instr_is_label(instr)) return &instr->label_data; else return NULL; } DR_API /* return the taken target pc of the (direct branch) inst */ app_pc instr_get_branch_target_pc(instr_t *cti_instr) { CLIENT_ASSERT(opnd_is_pc(instr_get_target(cti_instr)), "instr_branch_target_pc: target not pc"); return opnd_get_pc(instr_get_target(cti_instr)); } DR_API /* set the taken target pc of the (direct branch) inst */ void instr_set_branch_target_pc(instr_t *cti_instr, app_pc pc) { opnd_t op = opnd_create_pc(pc); instr_set_target(cti_instr, op); } bool instr_is_call(instr_t *instr) { instr_get_opcode(instr); /* force decode */ return instr_is_call_arch(instr); } bool instr_is_cbr(instr_t *instr) { instr_get_opcode(instr); /* force decode */ return instr_is_cbr_arch(instr); } bool instr_is_mbr(instr_t *instr) { instr_get_opcode(instr); /* force decode */ return instr_is_mbr_arch(instr); } bool instr_is_ubr(instr_t *instr) { instr_get_opcode(instr); /* force decode */ return instr_is_ubr_arch(instr); } /* An exit CTI is a control-transfer instruction whose target * is a pc (and not an instr_t pointer). This routine assumes * that no other input operands exist in a CTI. * An undecoded instr cannot be an exit cti. * This routine does NOT try to decode an opcode in a Level 1 or Level * 0 routine, and can thus be called on Level 0 routines. */ bool instr_is_exit_cti(instr_t *instr) { if (!instr_operands_valid(instr) || /* implies !opcode_valid */ instr_is_meta(instr)) return false; /* The _arch versions assume the opcode is already valid, avoiding * the conditional decode in instr_get_opcode(). */ if (instr_is_ubr_arch(instr) || instr_is_cbr_arch(instr)) { /* far pc should only happen for mangle's call to here */ return opnd_is_pc(instr_get_target(instr)); } return false; } bool instr_is_cti(instr_t *instr) /* any control-transfer instruction */ { instr_get_opcode(instr); /* force opcode decode, just once */ return (instr_is_cbr_arch(instr) || instr_is_ubr_arch(instr) || instr_is_mbr_arch(instr) || instr_is_call_arch(instr)); } int instr_get_interrupt_number(instr_t *instr) { CLIENT_ASSERT(instr_get_opcode(instr) == IF_X86_ELSE(OP_int, OP_svc), "instr_get_interrupt_number: instr not interrupt"); if (instr_operands_valid(instr)) { ptr_int_t val = opnd_get_immed_int(instr_get_src(instr, 0)); /* undo the sign extension. prob return value shouldn't be signed but * too late to bother changing that. */ CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_sbyte(val), "invalid interrupt number"); return (int)(byte)val; } else if (instr_raw_bits_valid(instr)) { /* widen as unsigned */ return (int)(uint)instr_get_raw_byte(instr, 1); } else { CLIENT_ASSERT(false, "instr_get_interrupt_number: invalid instr"); return 0; } } /* Returns true iff instr is a label meta-instruction */ bool instr_is_label(instr_t *instr) { return instr_opcode_valid(instr) && instr_get_opcode(instr) == OP_LABEL; } bool instr_uses_fp_reg(instr_t *instr) { int a; opnd_t curop; for (a = 0; a < instr_num_dsts(instr); a++) { curop = instr_get_dst(instr, a); if (opnd_is_reg(curop) && reg_is_fp(opnd_get_reg(curop))) return true; else if (opnd_is_memory_reference(curop)) { if (reg_is_fp(opnd_get_base(curop))) return true; else if (reg_is_fp(opnd_get_index(curop))) return true; } } for (a = 0; a < instr_num_srcs(instr); a++) { curop = instr_get_src(instr, a); if (opnd_is_reg(curop) && reg_is_fp(opnd_get_reg(curop))) return true; else if (opnd_is_memory_reference(curop)) { if (reg_is_fp(opnd_get_base(curop))) return true; else if (reg_is_fp(opnd_get_index(curop))) return true; } } return false; } /* We place these here rather than in mangle_shared.c to avoid the work of * linking mangle_shared.c into drdecodelib. */ instr_t * convert_to_near_rel_meta(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { return convert_to_near_rel_arch(dcontext, ilist, instr); } void convert_to_near_rel(dcontext_t *dcontext, instr_t *instr) { convert_to_near_rel_arch(dcontext, NULL, instr); } instr_t * instr_convert_short_meta_jmp_to_long(void *drcontext, instrlist_t *ilist, instr_t *instr) { dcontext_t *dcontext = (dcontext_t *)drcontext; /* PR 266292: we convert to a sequence of separate meta instrs for jecxz, etc. */ CLIENT_ASSERT(instr_is_meta(instr), "instr_convert_short_meta_jmp_to_long: instr is not meta"); CLIENT_ASSERT(instr_is_cti_short(instr), "instr_convert_short_meta_jmp_to_long: instr is not a short cti"); if (instr_is_app(instr) || !instr_is_cti_short(instr)) return instr; return convert_to_near_rel_meta(dcontext, ilist, instr); } /*********************************************************************** * instr_t creation routines * To use 16-bit data sizes, must call set_prefix after creating instr * To support this, all relevant registers must be of eAX form! * FIXME: how do that? * will an all-operand replacement work, or do some instrs have some * var-size regs but some const-size also? * * XXX: what if want eflags or modrm info on constructed instr?!? * * fld pushes onto top of stack, call that writing to ST0 or ST7? * f*p pops the stack -- not modeled at all! * should floating point constants be doubles, not floats?!? * * opcode complaints: * OP_imm vs. OP_st * OP_ret: build routines have to separate ret_imm and ret_far_imm * others, see FIXME's in instr_create_api.h */ instr_t * instr_create_0dst_0src(void *drcontext, int opcode) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 0, 0); return in; } instr_t * instr_create_0dst_1src(void *drcontext, int opcode, opnd_t src) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 0, 1); instr_set_src(in, 0, src); return in; } instr_t * instr_create_0dst_2src(void *drcontext, int opcode, opnd_t src1, opnd_t src2) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 0, 2); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); return in; } instr_t * instr_create_0dst_3src(void *drcontext, int opcode, opnd_t src1, opnd_t src2, opnd_t src3) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 0, 3); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); return in; } instr_t * instr_create_0dst_4src(void *drcontext, int opcode, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 0, 4); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); return in; } instr_t * instr_create_1dst_0src(void *drcontext, int opcode, opnd_t dst) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 1, 0); instr_set_dst(in, 0, dst); return in; } instr_t * instr_create_1dst_1src(void *drcontext, int opcode, opnd_t dst, opnd_t src) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 1, 1); instr_set_dst(in, 0, dst); instr_set_src(in, 0, src); return in; } instr_t * instr_create_1dst_2src(void *drcontext, int opcode, opnd_t dst, opnd_t src1, opnd_t src2) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 1, 2); instr_set_dst(in, 0, dst); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); return in; } instr_t * instr_create_1dst_3src(void *drcontext, int opcode, opnd_t dst, opnd_t src1, opnd_t src2, opnd_t src3) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 1, 3); instr_set_dst(in, 0, dst); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); return in; } instr_t * instr_create_1dst_4src(void *drcontext, int opcode, opnd_t dst, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 1, 4); instr_set_dst(in, 0, dst); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); return in; } instr_t * instr_create_1dst_5src(void *drcontext, int opcode, opnd_t dst, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4, opnd_t src5) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 1, 5); instr_set_dst(in, 0, dst); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); instr_set_src(in, 4, src5); return in; } instr_t * instr_create_2dst_0src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 2, 0); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); return in; } instr_t * instr_create_2dst_1src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t src) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 2, 1); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_src(in, 0, src); return in; } instr_t * instr_create_2dst_2src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t src1, opnd_t src2) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 2, 2); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); return in; } instr_t * instr_create_2dst_3src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t src1, opnd_t src2, opnd_t src3) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 2, 3); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); return in; } instr_t * instr_create_2dst_4src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 2, 4); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); return in; } instr_t * instr_create_2dst_5src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4, opnd_t src5) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 2, 5); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); instr_set_src(in, 4, src5); return in; } instr_t * instr_create_3dst_0src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 3, 0); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); return in; } instr_t * instr_create_3dst_2src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t src1, opnd_t src2) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 3, 2); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); return in; } instr_t * instr_create_3dst_3src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t src1, opnd_t src2, opnd_t src3) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 3, 3); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); return in; } instr_t * instr_create_3dst_4src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 3, 4); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); return in; } instr_t * instr_create_3dst_5src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4, opnd_t src5) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 3, 5); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); instr_set_src(in, 4, src5); return in; } instr_t * instr_create_4dst_1src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t dst4, opnd_t src) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 4, 1); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_dst(in, 3, dst4); instr_set_src(in, 0, src); return in; } instr_t * instr_create_4dst_2src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t dst4, opnd_t src1, opnd_t src2) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 4, 2); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_dst(in, 3, dst4); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); return in; } instr_t * instr_create_4dst_4src(void *drcontext, int opcode, opnd_t dst1, opnd_t dst2, opnd_t dst3, opnd_t dst4, opnd_t src1, opnd_t src2, opnd_t src3, opnd_t src4) { dcontext_t *dcontext = (dcontext_t *)drcontext; instr_t *in = instr_build(dcontext, opcode, 4, 4); instr_set_dst(in, 0, dst1); instr_set_dst(in, 1, dst2); instr_set_dst(in, 2, dst3); instr_set_dst(in, 3, dst4); instr_set_src(in, 0, src1); instr_set_src(in, 1, src2); instr_set_src(in, 2, src3); instr_set_src(in, 3, src4); return in; } instr_t * instr_create_Ndst_Msrc_varsrc(void *drcontext, int opcode, uint fixed_dsts, uint fixed_srcs, uint var_srcs, uint var_ord, ...) { dcontext_t *dcontext = (dcontext_t *)drcontext; va_list ap; instr_t *in = instr_build(dcontext, opcode, fixed_dsts, fixed_srcs + var_srcs); uint i; reg_id_t prev_reg = REG_NULL; bool check_order; va_start(ap, var_ord); for (i = 0; i < fixed_dsts; i++) instr_set_dst(in, i, va_arg(ap, opnd_t)); for (i = 0; i < MIN(var_ord, fixed_srcs); i++) instr_set_src(in, i, va_arg(ap, opnd_t)); for (i = var_ord; i < fixed_srcs; i++) instr_set_src(in, var_srcs + i, va_arg(ap, opnd_t)); /* we require regs in reglist are stored in order for easy split if necessary */ check_order = IF_ARM_ELSE(true, false); for (i = 0; i < var_srcs; i++) { opnd_t opnd = va_arg(ap, opnd_t); /* assuming non-reg opnds (if any) are in the fixed positon */ CLIENT_ASSERT(!check_order || (opnd_is_reg(opnd) && opnd_get_reg(opnd) > prev_reg), "instr_create_Ndst_Msrc_varsrc: wrong register order in reglist"); instr_set_src(in, var_ord + i, opnd_add_flags(opnd, DR_OPND_IN_LIST)); if (check_order) prev_reg = opnd_get_reg(opnd); } va_end(ap); return in; } instr_t * instr_create_Ndst_Msrc_vardst(void *drcontext, int opcode, uint fixed_dsts, uint fixed_srcs, uint var_dsts, uint var_ord, ...) { dcontext_t *dcontext = (dcontext_t *)drcontext; va_list ap; instr_t *in = instr_build(dcontext, opcode, fixed_dsts + var_dsts, fixed_srcs); uint i; reg_id_t prev_reg = REG_NULL; bool check_order; va_start(ap, var_ord); for (i = 0; i < MIN(var_ord, fixed_dsts); i++) instr_set_dst(in, i, va_arg(ap, opnd_t)); for (i = var_ord; i < fixed_dsts; i++) instr_set_dst(in, var_dsts + i, va_arg(ap, opnd_t)); for (i = 0; i < fixed_srcs; i++) instr_set_src(in, i, va_arg(ap, opnd_t)); /* we require regs in reglist are stored in order for easy split if necessary */ check_order = IF_ARM_ELSE(true, false); for (i = 0; i < var_dsts; i++) { opnd_t opnd = va_arg(ap, opnd_t); /* assuming non-reg opnds (if any) are in the fixed positon */ CLIENT_ASSERT(!check_order || (opnd_is_reg(opnd) && opnd_get_reg(opnd) > prev_reg), "instr_create_Ndst_Msrc_vardst: wrong register order in reglist"); instr_set_dst(in, var_ord + i, opnd_add_flags(opnd, DR_OPND_IN_LIST)); if (check_order) prev_reg = opnd_get_reg(opnd); } va_end(ap); return in; } /****************************************************************************/ /* build instructions from raw bits * convention: give them OP_UNDECODED opcodes */ instr_t * instr_create_raw_1byte(dcontext_t *dcontext, byte byte1) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 1); instr_set_raw_byte(in, 0, byte1); return in; } instr_t * instr_create_raw_2bytes(dcontext_t *dcontext, byte byte1, byte byte2) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 2); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); return in; } instr_t * instr_create_raw_3bytes(dcontext_t *dcontext, byte byte1, byte byte2, byte byte3) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 3); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); instr_set_raw_byte(in, 2, byte3); return in; } instr_t * instr_create_raw_4bytes(dcontext_t *dcontext, byte byte1, byte byte2, byte byte3, byte byte4) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 4); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); instr_set_raw_byte(in, 2, byte3); instr_set_raw_byte(in, 3, byte4); return in; } instr_t * instr_create_raw_5bytes(dcontext_t *dcontext, byte byte1, byte byte2, byte byte3, byte byte4, byte byte5) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 5); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); instr_set_raw_byte(in, 2, byte3); instr_set_raw_byte(in, 3, byte4); instr_set_raw_byte(in, 4, byte5); return in; } instr_t * instr_create_raw_6bytes(dcontext_t *dcontext, byte byte1, byte byte2, byte byte3, byte byte4, byte byte5, byte byte6) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 6); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); instr_set_raw_byte(in, 2, byte3); instr_set_raw_byte(in, 3, byte4); instr_set_raw_byte(in, 4, byte5); instr_set_raw_byte(in, 5, byte6); return in; } instr_t * instr_create_raw_7bytes(dcontext_t *dcontext, byte byte1, byte byte2, byte byte3, byte byte4, byte byte5, byte byte6, byte byte7) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 7); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); instr_set_raw_byte(in, 2, byte3); instr_set_raw_byte(in, 3, byte4); instr_set_raw_byte(in, 4, byte5); instr_set_raw_byte(in, 5, byte6); instr_set_raw_byte(in, 6, byte7); return in; } instr_t * instr_create_raw_8bytes(dcontext_t *dcontext, byte byte1, byte byte2, byte byte3, byte byte4, byte byte5, byte byte6, byte byte7, byte byte8) { instr_t *in = instr_build_bits(dcontext, OP_UNDECODED, 8); instr_set_raw_byte(in, 0, byte1); instr_set_raw_byte(in, 1, byte2); instr_set_raw_byte(in, 2, byte3); instr_set_raw_byte(in, 3, byte4); instr_set_raw_byte(in, 4, byte5); instr_set_raw_byte(in, 5, byte6); instr_set_raw_byte(in, 6, byte7); instr_set_raw_byte(in, 7, byte8); return in; } #ifndef STANDALONE_DECODER /****************************************************************************/ /* dcontext convenience routines */ instr_t * instr_create_restore_from_dcontext(dcontext_t *dcontext, reg_id_t reg, int offs) { opnd_t memopnd = opnd_create_dcontext_field(dcontext, offs); /* use movd for xmm/mmx */ if (reg_is_xmm(reg) || reg_is_mmx(reg)) return XINST_CREATE_load_simd(dcontext, opnd_create_reg(reg), memopnd); else return XINST_CREATE_load(dcontext, opnd_create_reg(reg), memopnd); } instr_t * instr_create_save_to_dcontext(dcontext_t *dcontext, reg_id_t reg, int offs) { opnd_t memopnd = opnd_create_dcontext_field(dcontext, offs); CLIENT_ASSERT(dcontext != GLOBAL_DCONTEXT, "instr_create_save_to_dcontext: invalid dcontext"); /* use movd for xmm/mmx */ if (reg_is_xmm(reg) || reg_is_mmx(reg)) return XINST_CREATE_store_simd(dcontext, memopnd, opnd_create_reg(reg)); else return XINST_CREATE_store(dcontext, memopnd, opnd_create_reg(reg)); } /* Use basereg==REG_NULL to get default (xdi, or xsi for upcontext) * Auto-magically picks the mem opnd size to match reg if it's a GPR. */ instr_t * instr_create_restore_from_dc_via_reg(dcontext_t *dcontext, reg_id_t basereg, reg_id_t reg, int offs) { /* use movd for xmm/mmx, and OPSZ_PTR */ if (reg_is_xmm(reg) || reg_is_mmx(reg)) { opnd_t memopnd = opnd_create_dcontext_field_via_reg(dcontext, basereg, offs); return XINST_CREATE_load_simd(dcontext, opnd_create_reg(reg), memopnd); } else { opnd_t memopnd = opnd_create_dcontext_field_via_reg_sz(dcontext, basereg, offs, reg_get_size(reg)); return XINST_CREATE_load(dcontext, opnd_create_reg(reg), memopnd); } } /* Use basereg==REG_NULL to get default (xdi, or xsi for upcontext) * Auto-magically picks the mem opnd size to match reg if it's a GPR. */ instr_t * instr_create_save_to_dc_via_reg(dcontext_t *dcontext, reg_id_t basereg, reg_id_t reg, int offs) { /* use movd for xmm/mmx, and OPSZ_PTR */ if (reg_is_xmm(reg) || reg_is_mmx(reg)) { opnd_t memopnd = opnd_create_dcontext_field_via_reg(dcontext, basereg, offs); return XINST_CREATE_store_simd(dcontext, memopnd, opnd_create_reg(reg)); } else { opnd_t memopnd = opnd_create_dcontext_field_via_reg_sz(dcontext, basereg, offs, reg_get_size(reg)); return XINST_CREATE_store(dcontext, memopnd, opnd_create_reg(reg)); } } static instr_t * instr_create_save_immedN_to_dcontext(dcontext_t *dcontext, opnd_size_t sz, opnd_t immed_op, int offs) { opnd_t memopnd = opnd_create_dcontext_field_sz(dcontext, offs, sz); /* PR 244737: thread-private scratch space needs to fixed for x64 */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* There is no immed to mem instr on ARM/AArch64. */ IF_AARCHXX(ASSERT_NOT_IMPLEMENTED(false)); return XINST_CREATE_store(dcontext, memopnd, immed_op); } instr_t * instr_create_save_immed32_to_dcontext(dcontext_t *dcontext, int immed, int offs) { return instr_create_save_immedN_to_dcontext(dcontext, OPSZ_4, OPND_CREATE_INT32(immed), offs); } instr_t * instr_create_save_immed16_to_dcontext(dcontext_t *dcontext, int immed, int offs) { return instr_create_save_immedN_to_dcontext(dcontext, OPSZ_2, OPND_CREATE_INT16(immed), offs); } instr_t * instr_create_save_immed8_to_dcontext(dcontext_t *dcontext, int immed, int offs) { return instr_create_save_immedN_to_dcontext(dcontext, OPSZ_1, OPND_CREATE_INT8(immed), offs); } instr_t * instr_create_save_immed_to_dc_via_reg(dcontext_t *dcontext, reg_id_t basereg, int offs, ptr_int_t immed, opnd_size_t sz) { opnd_t memopnd = opnd_create_dcontext_field_via_reg_sz(dcontext, basereg, offs, sz); ASSERT(sz == OPSZ_1 || sz == OPSZ_2 || sz == OPSZ_4); /* There is no immed to mem instr on ARM or AArch64. */ IF_NOT_X86(ASSERT_NOT_IMPLEMENTED(false)); return XINST_CREATE_store(dcontext, memopnd, opnd_create_immed_int(immed, sz)); } instr_t * instr_create_jump_via_dcontext(dcontext_t *dcontext, int offs) { # ifdef AARCH64 ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ return 0; # else opnd_t memopnd = opnd_create_dcontext_field(dcontext, offs); return XINST_CREATE_jump_mem(dcontext, memopnd); # endif } /* there is no corresponding save routine since we no longer support * keeping state on the stack while code other than our own is running * (in the same thread) */ instr_t * instr_create_restore_dynamo_stack(dcontext_t *dcontext) { return instr_create_restore_from_dcontext(dcontext, REG_XSP, DSTACK_OFFSET); } /* make sure to keep in sync w/ emit_utils.c's insert_spill_or_restore() */ bool instr_raw_is_tls_spill(byte *pc, reg_id_t reg, ushort offs) { # ifdef X86 ASSERT_NOT_IMPLEMENTED(reg != REG_XAX); # ifdef X64 /* match insert_jmp_to_ibl */ if (*pc == TLS_SEG_OPCODE && *(pc + 1) == (REX_PREFIX_BASE_OPCODE | REX_PREFIX_W_OPFLAG) && *(pc + 2) == MOV_REG2MEM_OPCODE && /* 0x1c for ebx, 0x0c for ecx, 0x04 for eax */ *(pc + 3) == MODRM_BYTE(0 /*mod*/, reg_get_bits(reg), 4 /*rm*/) && *(pc + 4) == 0x25 && *((uint *)(pc + 5)) == (uint)os_tls_offset(offs)) return true; /* we also check for 32-bit. we could take in flags and only check for one * version, but we're not worried about false positives. */ # endif /* looking for: 67 64 89 1e e4 0e addr16 mov %ebx -> %fs:0xee4 */ /* ASSUMPTION: when addr16 prefix is used, prefix order is fixed */ return (*pc == ADDR_PREFIX_OPCODE && *(pc + 1) == TLS_SEG_OPCODE && *(pc + 2) == MOV_REG2MEM_OPCODE && /* 0x1e for ebx, 0x0e for ecx, 0x06 for eax */ *(pc + 3) == MODRM_BYTE(0 /*mod*/, reg_get_bits(reg), 6 /*rm*/) && *((ushort *)(pc + 4)) == os_tls_offset(offs)) || /* PR 209709: allow for no addr16 prefix */ (*pc == TLS_SEG_OPCODE && *(pc + 1) == MOV_REG2MEM_OPCODE && /* 0x1e for ebx, 0x0e for ecx, 0x06 for eax */ *(pc + 2) == MODRM_BYTE(0 /*mod*/, reg_get_bits(reg), 6 /*rm*/) && *((uint *)(pc + 4)) == os_tls_offset(offs)); # elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; # endif /* X86/ARM */ } /* this routine may upgrade a level 1 instr */ static bool instr_check_tls_spill_restore(instr_t *instr, bool *spill, reg_id_t *reg, int *offs) { opnd_t regop, memop; CLIENT_ASSERT(instr != NULL, "internal error: tls spill/restore check: NULL argument"); if (instr_get_opcode(instr) == OP_store) { regop = instr_get_src(instr, 0); memop = instr_get_dst(instr, 0); if (spill != NULL) *spill = true; } else if (instr_get_opcode(instr) == OP_load) { regop = instr_get_dst(instr, 0); memop = instr_get_src(instr, 0); if (spill != NULL) *spill = false; # ifdef X86 } else if (instr_get_opcode(instr) == OP_xchg) { /* we use xchg to restore in dr_insert_mbr_instrumentation */ regop = instr_get_src(instr, 0); memop = instr_get_dst(instr, 0); if (spill != NULL) *spill = false; # endif } else return false; if (opnd_is_reg(regop) && # ifdef X86 opnd_is_far_base_disp(memop) && opnd_get_segment(memop) == SEG_TLS && opnd_is_abs_base_disp(memop) # elif defined(AARCHXX) opnd_is_base_disp(memop) && opnd_get_base(memop) == dr_reg_stolen && opnd_get_index(memop) == DR_REG_NULL # endif ) { if (reg != NULL) *reg = opnd_get_reg(regop); if (offs != NULL) *offs = opnd_get_disp(memop); return true; } return false; } /* if instr is level 1, does not upgrade it and instead looks at raw bits, * to support identification w/o ruining level 0 in decode_fragment, etc. */ bool instr_is_tls_spill(instr_t *instr, reg_id_t reg, ushort offs) { reg_id_t check_reg = REG_NULL; /* init to satisfy some compilers */ int check_disp = 0; /* init to satisfy some compilers */ bool spill; return (instr_check_tls_spill_restore(instr, &spill, &check_reg, &check_disp) && spill && check_reg == reg && check_disp == os_tls_offset(offs)); } /* if instr is level 1, does not upgrade it and instead looks at raw bits, * to support identification w/o ruining level 0 in decode_fragment, etc. */ bool instr_is_tls_restore(instr_t *instr, reg_id_t reg, ushort offs) { reg_id_t check_reg = REG_NULL; /* init to satisfy some compilers */ int check_disp = 0; /* init to satisfy some compilers */ bool spill; return (instr_check_tls_spill_restore(instr, &spill, &check_reg, &check_disp) && !spill && (reg == REG_NULL || check_reg == reg) && check_disp == os_tls_offset(offs)); } /* if instr is level 1, does not upgrade it and instead looks at raw bits, * to support identification w/o ruining level 0 in decode_fragment, etc. */ bool instr_is_tls_xcx_spill(instr_t *instr) { # ifdef X86 if (instr_raw_bits_valid(instr)) { /* avoid upgrading instr */ return instr_raw_is_tls_spill(instr_get_raw_bits(instr), REG_ECX, MANGLE_XCX_SPILL_SLOT); } else return instr_is_tls_spill(instr, REG_ECX, MANGLE_XCX_SPILL_SLOT); # elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; # endif } /* this routine may upgrade a level 1 instr */ static bool instr_check_mcontext_spill_restore(dcontext_t *dcontext, instr_t *instr, bool *spill, reg_id_t *reg, int *offs) { # ifdef X64 /* PR 244737: we always use tls for x64 */ return false; # else opnd_t regop, memop; if (instr_get_opcode(instr) == OP_store) { regop = instr_get_src(instr, 0); memop = instr_get_dst(instr, 0); if (spill != NULL) *spill = true; } else if (instr_get_opcode(instr) == OP_load) { regop = instr_get_dst(instr, 0); memop = instr_get_src(instr, 0); if (spill != NULL) *spill = false; # ifdef X86 } else if (instr_get_opcode(instr) == OP_xchg) { /* we use xchg to restore in dr_insert_mbr_instrumentation */ regop = instr_get_src(instr, 0); memop = instr_get_dst(instr, 0); if (spill != NULL) *spill = false; # endif /* X86 */ } else return false; if (opnd_is_near_base_disp(memop) && opnd_is_abs_base_disp(memop) && opnd_is_reg(regop)) { byte *pc = (byte *)opnd_get_disp(memop); byte *mc = (byte *)get_mcontext(dcontext); if (pc >= mc && pc < mc + sizeof(priv_mcontext_t)) { if (reg != NULL) *reg = opnd_get_reg(regop); if (offs != NULL) *offs = pc - (byte *)dcontext; return true; } } return false; # endif } static bool instr_is_reg_spill_or_restore_ex(void *drcontext, instr_t *instr, bool DR_only, bool *tls, bool *spill, reg_id_t *reg, uint *offs_out) { dcontext_t *dcontext = (dcontext_t *)drcontext; int check_disp = 0; /* init to satisfy some compilers */ reg_id_t myreg; CLIENT_ASSERT(instr != NULL, "invalid NULL argument"); if (reg == NULL) reg = &myreg; if (instr_check_tls_spill_restore(instr, spill, reg, &check_disp)) { if (!DR_only || (reg_spill_tls_offs(*reg) != -1 && /* Mangling may choose to spill registers to a not natural tls offset, * e.g. rip-rel mangling will, if rax is used by the instruction. We * allow for all possible internal DR slots to recognize a DR spill. */ (check_disp == os_tls_offset((ushort)TLS_REG0_SLOT) || check_disp == os_tls_offset((ushort)TLS_REG1_SLOT) || check_disp == os_tls_offset((ushort)TLS_REG2_SLOT) || check_disp == os_tls_offset((ushort)TLS_REG3_SLOT) # ifdef AARCHXX || check_disp == os_tls_offset((ushort)TLS_REG4_SLOT) || check_disp == os_tls_offset((ushort)TLS_REG5_SLOT) # endif ))) { if (tls != NULL) *tls = true; if (offs_out != NULL) *offs_out = check_disp; return true; } } if (dcontext != GLOBAL_DCONTEXT && instr_check_mcontext_spill_restore(dcontext, instr, spill, reg, &check_disp)) { int offs = opnd_get_reg_dcontext_offs(dr_reg_fixer[*reg]); if (!DR_only || (offs != -1 && check_disp == offs)) { if (tls != NULL) *tls = false; if (offs_out != NULL) *offs_out = check_disp; return true; } } return false; } DR_API bool instr_is_reg_spill_or_restore(void *drcontext, instr_t *instr, bool *tls, bool *spill, reg_id_t *reg, uint *offs) { return instr_is_reg_spill_or_restore_ex(drcontext, instr, false, tls, spill, reg, offs); } bool instr_is_DR_reg_spill_or_restore(void *drcontext, instr_t *instr, bool *tls, bool *spill, reg_id_t *reg, uint *offs) { return instr_is_reg_spill_or_restore_ex(drcontext, instr, true, tls, spill, reg, offs); } /* N.B. : client meta routines (dr_insert_* etc.) should never use anything other * then TLS_XAX_SLOT unless the client has specified a slot to use as we let the * client use the rest. */ instr_t * instr_create_save_to_tls(dcontext_t *dcontext, reg_id_t reg, ushort offs) { return XINST_CREATE_store(dcontext, opnd_create_tls_slot(os_tls_offset(offs)), opnd_create_reg(reg)); } instr_t * instr_create_restore_from_tls(dcontext_t *dcontext, reg_id_t reg, ushort offs) { return XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(os_tls_offset(offs))); } /* For -x86_to_x64, we can spill to 64-bit extra registers (xref i#751). */ instr_t * instr_create_save_to_reg(dcontext_t *dcontext, reg_id_t reg1, reg_id_t reg2) { return XINST_CREATE_move(dcontext, opnd_create_reg(reg2), opnd_create_reg(reg1)); } instr_t * instr_create_restore_from_reg(dcontext_t *dcontext, reg_id_t reg1, reg_id_t reg2) { return XINST_CREATE_move(dcontext, opnd_create_reg(reg1), opnd_create_reg(reg2)); } # ifdef X86_64 /* Returns NULL if pc is not the start of a rip-rel lea. * If it could be, returns the address it refers to (which we assume is * never NULL). */ byte * instr_raw_is_rip_rel_lea(byte *pc, byte *read_end) { /* PR 215408: look for "lea reg, [rip+disp]" * We assume no extraneous prefixes, and we require rex.w, though not strictly * necessary for say WOW64 or other known-lower-4GB situations */ if (pc + 7 <= read_end) { if (*(pc + 1) == RAW_OPCODE_lea && (TESTALL(REX_PREFIX_BASE_OPCODE | REX_PREFIX_W_OPFLAG, *pc) && !TESTANY(~(REX_PREFIX_BASE_OPCODE | REX_PREFIX_ALL_OPFLAGS), *pc)) && /* does mod==0 and rm==5? */ ((*(pc + 2)) | MODRM_BYTE(0, 7, 0)) == MODRM_BYTE(0, 7, 5)) { return pc + 7 + *(int *)(pc + 3); } } return NULL; } # endif uint move_mm_reg_opcode(bool aligned16, bool aligned32) { # ifdef X86 if (YMM_ENABLED()) { /* must preserve ymm registers */ return (aligned32 ? OP_vmovdqa : OP_vmovdqu); } else if (proc_has_feature(FEATURE_SSE2)) { return (aligned16 ? OP_movdqa : OP_movdqu); } else { CLIENT_ASSERT(proc_has_feature(FEATURE_SSE), "running on unsupported processor"); return (aligned16 ? OP_movaps : OP_movups); } # elif defined(ARM) /* FIXME i#1551: which one we should return, OP_vmov, OP_vldr, or OP_vstr? */ return OP_vmov; # elif defined(AARCH64) ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ return 0; # endif /* X86/ARM */ } uint move_mm_avx512_reg_opcode(bool aligned64) { # ifdef X86 /* move_mm_avx512_reg_opcode can only be called on processors that support AVX-512. */ ASSERT(ZMM_ENABLED()); return (aligned64 ? OP_vmovaps : OP_vmovups); # else /* move_mm_avx512_reg_opcode not supported on ARM/AArch64. */ ASSERT_NOT_IMPLEMENTED(false); return 0; # endif /* X86 */ } #endif /* !STANDALONE_DECODER */ /****************************************************************************/
1
23,200
So this means that we can remove a callback via instr_set_label_callback(). Maybe consider adding another API function for removal, and add a new assert here such that cb != NULL?
DynamoRIO-dynamorio
c
@@ -30,7 +30,9 @@ <p><%= _("Last modified: ") + l(@plan.updated_at.to_date, formats: :short) %></p> <br> - <p><%= _("Copyright information:") %></p> - <div style="margin-left: 15px;"><p><%= _(" The above plan creator(s) have agreed that others may use as much of the text of this plan as they would like in their own plans, and customise it as necessary. You do not need to credit the creator(s) as the source of the language used, but using any of the plan's text does not imply that the creator(s) endorse, or have any relationship to, your project or proposal") %></p></div> + <% if @public_plan %> + <p><%= _("Copyright information:") %></p> + <div style="margin-left: 15px;"><p><%= _(" The above plan creator(s) have agreed that others may use as much of the text of this plan as they would like in their own plans, and customise it as necessary. You do not need to credit the creator(s) as the source of the language used, but using any of the plan's text does not imply that the creator(s) endorse, or have any relationship to, your project or proposal") %></p></div> + <% end %> </div> <hr class="bottom" />
1
<hr /> <h1><%= @plan.title %></h1> <p><em> <%= _("A Data Management Plan created using ") + Rails.configuration.branding[:application][:name] %> </em></p> <br/> <div class="cover-page"> <p><%= @hash[:attribution].length > 1 ? _("Creators: ") : _('Creator:') %> <%= @hash[:attribution].join(', ') %></p><br> <p><%= _("Affiliation: ") + @hash[:affiliation] %></p><br> <% if @hash[:funder].present? %> <p><%= _("Template: ") + @hash[:funder] %></p><br> <% else %> <p><%= _("Template: ") + @hash[:template] + @hash[:customizer] %></p><br> <% end %> <% if @plan.principal_investigator_identifier.present? %> <p><%= _("ORCID iD: ") + @plan.principal_investigator_identifier %></p> <br> <% end %> <% if @plan.grant_number.present? %> <p><%= _("Grant number: ") + @plan.grant_number %></p> <br> <% end %> <% if @plan.description.present? %> <p><%= _("Project abstract: ") %></p> <div style="margin-left: 15px;"><%= raw(@plan.description) %></div><br> <% end %> <p><%= _("Last modified: ") + l(@plan.updated_at.to_date, formats: :short) %></p> <br> <p><%= _("Copyright information:") %></p> <div style="margin-left: 15px;"><p><%= _(" The above plan creator(s) have agreed that others may use as much of the text of this plan as they would like in their own plans, and customise it as necessary. You do not need to credit the creator(s) as the source of the language used, but using any of the plan's text does not imply that the creator(s) endorse, or have any relationship to, your project or proposal") %></p></div> </div> <hr class="bottom" />
1
17,682
why are we using an instance variable instead of accessing the visibility method?
DMPRoadmap-roadmap
rb
@@ -173,6 +173,7 @@ func (h *HTTPTransport) NewTransport(ctx caddy.Context) (*http.Transport, error) dialer.Resolver = &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { + //nolint:gosec addr := h.Resolver.netAddrs[weakrand.Intn(len(h.Resolver.netAddrs))] return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) },
1
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "context" "crypto/tls" "crypto/x509" "encoding/base64" "fmt" "io/ioutil" weakrand "math/rand" "net" "net/http" "reflect" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddytls" "golang.org/x/net/http2" ) func init() { caddy.RegisterModule(HTTPTransport{}) } // HTTPTransport is essentially a configuration wrapper for http.Transport. // It defines a JSON structure useful when configuring the HTTP transport // for Caddy's reverse proxy. It builds its http.Transport at Provision. type HTTPTransport struct { // TODO: It's possible that other transports (like fastcgi) might be // able to borrow/use at least some of these config fields; if so, // maybe move them into a type called CommonTransport and embed it? // Configures the DNS resolver used to resolve the IP address of upstream hostnames. Resolver *UpstreamResolver `json:"resolver,omitempty"` // Configures TLS to the upstream. Setting this to an empty struct // is sufficient to enable TLS with reasonable defaults. TLS *TLSConfig `json:"tls,omitempty"` // Configures HTTP Keep-Alive (enabled by default). Should only be // necessary if rigorous testing has shown that tuning this helps // improve performance. KeepAlive *KeepAlive `json:"keep_alive,omitempty"` // Whether to enable compression to upstream. Default: true Compression *bool `json:"compression,omitempty"` // Maximum number of connections per host. Default: 0 (no limit) MaxConnsPerHost int `json:"max_conns_per_host,omitempty"` // Maximum number of idle connections per host. Default: 0 (uses Go's default of 2) MaxIdleConnsPerHost int `json:"max_idle_conns_per_host,omitempty"` // How long to wait before timing out trying to connect to // an upstream. DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` // How long to wait before spawning an RFC 6555 Fast Fallback // connection. A negative value disables this. FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"` // How long to wait for reading response headers from server. ResponseHeaderTimeout caddy.Duration `json:"response_header_timeout,omitempty"` // The length of time to wait for a server's first response // headers after fully writing the request headers if the // request has a header "Expect: 100-continue". ExpectContinueTimeout caddy.Duration `json:"expect_continue_timeout,omitempty"` // The maximum bytes to read from response headers. MaxResponseHeaderSize int64 `json:"max_response_header_size,omitempty"` // The size of the write buffer in bytes. WriteBufferSize int `json:"write_buffer_size,omitempty"` // The size of the read buffer in bytes. ReadBufferSize int `json:"read_buffer_size,omitempty"` // The versions of HTTP to support. As a special case, "h2c" // can be specified to use H2C (HTTP/2 over Cleartext) to the // upstream (this feature is experimental and subject to // change or removal). Default: ["1.1", "2"] Versions []string `json:"versions,omitempty"` // The pre-configured underlying HTTP transport. Transport *http.Transport `json:"-"` h2cTransport *http2.Transport } // CaddyModule returns the Caddy module information. func (HTTPTransport) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.transport.http", New: func() caddy.Module { return new(HTTPTransport) }, } } // Provision sets up h.Transport with a *http.Transport // that is ready to use. func (h *HTTPTransport) Provision(ctx caddy.Context) error { if len(h.Versions) == 0 { h.Versions = []string{"1.1", "2"} } rt, err := h.NewTransport(ctx) if err != nil { return err } h.Transport = rt // if h2c is enabled, configure its transport (std lib http.Transport // does not "HTTP/2 over cleartext TCP") if sliceContains(h.Versions, "h2c") { // crafting our own http2.Transport doesn't allow us to utilize // most of the customizations/preferences on the http.Transport, // because, for some reason, only http2.ConfigureTransport() // is allowed to set the unexported field that refers to a base // http.Transport config; oh well h2t := &http2.Transport{ // kind of a hack, but for plaintext/H2C requests, pretend to dial TLS DialTLS: func(network, addr string, _ *tls.Config) (net.Conn, error) { // TODO: no context, thus potentially wrong dial info return net.Dial(network, addr) }, AllowHTTP: true, } if h.Compression != nil { h2t.DisableCompression = !*h.Compression } h.h2cTransport = h2t } return nil } // NewTransport builds a standard-lib-compatible http.Transport value from h. func (h *HTTPTransport) NewTransport(ctx caddy.Context) (*http.Transport, error) { dialer := &net.Dialer{ Timeout: time.Duration(h.DialTimeout), FallbackDelay: time.Duration(h.FallbackDelay), } if h.Resolver != nil { for _, v := range h.Resolver.Addresses { addr, err := caddy.ParseNetworkAddress(v) if err != nil { return nil, err } if addr.PortRangeSize() != 1 { return nil, fmt.Errorf("resolver address must have exactly one address; cannot call %v", addr) } h.Resolver.netAddrs = append(h.Resolver.netAddrs, addr) } d := &net.Dialer{ Timeout: time.Duration(h.DialTimeout), FallbackDelay: time.Duration(h.FallbackDelay), } dialer.Resolver = &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { addr := h.Resolver.netAddrs[weakrand.Intn(len(h.Resolver.netAddrs))] return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) }, } } rt := &http.Transport{ DialContext: func(ctx context.Context, network, address string) (net.Conn, error) { // the proper dialing information should be embedded into the request's context if dialInfo, ok := GetDialInfo(ctx); ok { network = dialInfo.Network address = dialInfo.Address } conn, err := dialer.DialContext(ctx, network, address) if err != nil { // identify this error as one that occurred during // dialing, which can be important when trying to // decide whether to retry a request return nil, DialError{err} } return conn, nil }, MaxConnsPerHost: h.MaxConnsPerHost, MaxIdleConnsPerHost: h.MaxIdleConnsPerHost, ResponseHeaderTimeout: time.Duration(h.ResponseHeaderTimeout), ExpectContinueTimeout: time.Duration(h.ExpectContinueTimeout), MaxResponseHeaderBytes: h.MaxResponseHeaderSize, WriteBufferSize: h.WriteBufferSize, ReadBufferSize: h.ReadBufferSize, } if h.TLS != nil { rt.TLSHandshakeTimeout = time.Duration(h.TLS.HandshakeTimeout) var err error rt.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(ctx) if err != nil { return nil, fmt.Errorf("making TLS client config: %v", err) } } if h.KeepAlive != nil { dialer.KeepAlive = time.Duration(h.KeepAlive.ProbeInterval) if h.KeepAlive.Enabled != nil { rt.DisableKeepAlives = !*h.KeepAlive.Enabled } rt.MaxIdleConns = h.KeepAlive.MaxIdleConns rt.MaxIdleConnsPerHost = h.KeepAlive.MaxIdleConnsPerHost rt.IdleConnTimeout = time.Duration(h.KeepAlive.IdleConnTimeout) } if h.Compression != nil { rt.DisableCompression = !*h.Compression } if sliceContains(h.Versions, "2") { if err := http2.ConfigureTransport(rt); err != nil { return nil, err } } return rt, nil } // RoundTrip implements http.RoundTripper. func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { h.SetScheme(req) // if H2C ("HTTP/2 over cleartext") is enabled and the upstream request is // HTTP/2 without TLS, use the alternate H2C-capable transport instead if req.ProtoMajor == 2 && req.URL.Scheme == "http" && h.h2cTransport != nil { return h.h2cTransport.RoundTrip(req) } return h.Transport.RoundTrip(req) } // SetScheme ensures that the outbound request req // has the scheme set in its URL; the underlying // http.Transport requires a scheme to be set. func (h *HTTPTransport) SetScheme(req *http.Request) { if req.URL.Scheme == "" { req.URL.Scheme = "http" if h.TLS != nil { req.URL.Scheme = "https" } } } // TLSEnabled returns true if TLS is enabled. func (h HTTPTransport) TLSEnabled() bool { return h.TLS != nil } // EnableTLS enables TLS on the transport. func (h *HTTPTransport) EnableTLS(base *TLSConfig) error { h.TLS = base return nil } // Cleanup implements caddy.CleanerUpper and closes any idle connections. func (h HTTPTransport) Cleanup() error { if h.Transport == nil { return nil } h.Transport.CloseIdleConnections() return nil } // TLSConfig holds configuration related to the TLS configuration for the // transport/client. type TLSConfig struct { // Optional list of base64-encoded DER-encoded CA certificates to trust. RootCAPool []string `json:"root_ca_pool,omitempty"` // List of PEM-encoded CA certificate files to add to the same trust // store as RootCAPool (or root_ca_pool in the JSON). RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"` // PEM-encoded client certificate filename to present to servers. ClientCertificateFile string `json:"client_certificate_file,omitempty"` // PEM-encoded key to use with the client certificate. ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"` // If specified, Caddy will use and automate a client certificate // with this subject name. ClientCertificateAutomate string `json:"client_certificate_automate,omitempty"` // If true, TLS verification of server certificates will be disabled. // This is insecure and may be removed in the future. Do not use this // option except in testing or local development environments. InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"` // The duration to allow a TLS handshake to a server. HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"` // The server name (SNI) to use in TLS handshakes. ServerName string `json:"server_name,omitempty"` } // MakeTLSClientConfig returns a tls.Config usable by a client to a backend. // If there is no custom TLS configuration, a nil config may be returned. func (t TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) { cfg := new(tls.Config) // client auth if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile == "" { return nil, fmt.Errorf("client_certificate_file specified without client_certificate_key_file") } if t.ClientCertificateFile == "" && t.ClientCertificateKeyFile != "" { return nil, fmt.Errorf("client_certificate_key_file specified without client_certificate_file") } if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile != "" { cert, err := tls.LoadX509KeyPair(t.ClientCertificateFile, t.ClientCertificateKeyFile) if err != nil { return nil, fmt.Errorf("loading client certificate key pair: %v", err) } cfg.Certificates = []tls.Certificate{cert} } if t.ClientCertificateAutomate != "" { tlsAppIface, err := ctx.App("tls") if err != nil { return nil, fmt.Errorf("getting tls app: %v", err) } tlsApp := tlsAppIface.(*caddytls.TLS) err = tlsApp.Manage([]string{t.ClientCertificateAutomate}) if err != nil { return nil, fmt.Errorf("managing client certificate: %v", err) } cfg.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { certs := tlsApp.AllMatchingCertificates(t.ClientCertificateAutomate) var err error for _, cert := range certs { err = cri.SupportsCertificate(&cert.Certificate) if err == nil { return &cert.Certificate, nil } } return nil, err } } // trusted root CAs if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 { rootPool := x509.NewCertPool() for _, encodedCACert := range t.RootCAPool { caCert, err := decodeBase64DERCert(encodedCACert) if err != nil { return nil, fmt.Errorf("parsing CA certificate: %v", err) } rootPool.AddCert(caCert) } for _, pemFile := range t.RootCAPEMFiles { pemData, err := ioutil.ReadFile(pemFile) if err != nil { return nil, fmt.Errorf("failed reading ca cert: %v", err) } rootPool.AppendCertsFromPEM(pemData) } cfg.RootCAs = rootPool } // custom SNI cfg.ServerName = t.ServerName // throw all security out the window cfg.InsecureSkipVerify = t.InsecureSkipVerify // only return a config if it's not empty if reflect.DeepEqual(cfg, new(tls.Config)) { return nil, nil } return cfg, nil } // UpstreamResolver holds the set of addresses of DNS resolvers of // upstream addresses type UpstreamResolver struct { // The addresses of DNS resolvers to use when looking up the addresses of proxy upstreams. // It accepts [network addresses](/docs/conventions#network-addresses) // with port range of only 1. If the host is an IP address, it will be dialed directly to resolve the upstream server. // If the host is not an IP address, the addresses are resolved using the [name resolution convention](https://golang.org/pkg/net/#hdr-Name_Resolution) of the Go standard library. // If the array contains more than 1 resolver address, one is chosen at random. Addresses []string `json:"addresses,omitempty"` netAddrs []caddy.NetworkAddress } // KeepAlive holds configuration pertaining to HTTP Keep-Alive. type KeepAlive struct { // Whether HTTP Keep-Alive is enabled. Default: true Enabled *bool `json:"enabled,omitempty"` // How often to probe for liveness. ProbeInterval caddy.Duration `json:"probe_interval,omitempty"` // Maximum number of idle connections. MaxIdleConns int `json:"max_idle_conns,omitempty"` // Maximum number of idle connections per upstream host. MaxIdleConnsPerHost int `json:"max_idle_conns_per_host,omitempty"` // How long connections should be kept alive when idle. IdleConnTimeout caddy.Duration `json:"idle_timeout,omitempty"` } // decodeBase64DERCert base64-decodes, then DER-decodes, certStr. func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { // decode base64 derBytes, err := base64.StdEncoding.DecodeString(certStr) if err != nil { return nil, err } // parse the DER-encoded certificate return x509.ParseCertificate(derBytes) } // sliceContains returns true if needle is in haystack. func sliceContains(haystack []string, needle string) bool { for _, s := range haystack { if s == needle { return true } } return false } // Interface guards var ( _ caddy.Provisioner = (*HTTPTransport)(nil) _ http.RoundTripper = (*HTTPTransport)(nil) _ caddy.CleanerUpper = (*HTTPTransport)(nil) _ TLSTransport = (*HTTPTransport)(nil) )
1
15,752
Ok. Can we just disable this linter?
caddyserver-caddy
go
@@ -6,7 +6,7 @@ from listenbrainz import config from listenbrainz import db ADMIN_SQL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..','admin', 'sql') -TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_data') +TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'testdata') class DatabaseTestCase(unittest.TestCase):
1
import os import unittest from listenbrainz import config from listenbrainz import db ADMIN_SQL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..','admin', 'sql') TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_data') class DatabaseTestCase(unittest.TestCase): def setUp(self): self.config = config db.init_db_connection(config.SQLALCHEMY_DATABASE_URI) self.reset_db() def tearDown(self): self.drop_tables() def reset_db(self): self.drop_tables() self.init_db() def init_db(self): db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_schema.sql')) db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_tables.sql')) db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_primary_keys.sql')) db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_foreign_keys.sql')) db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_indexes.sql')) def drop_tables(self): self.drop_schema() db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'drop_tables.sql')) def drop_schema(self): db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'drop_schema.sql')) def load_data_files(self): """ Get the data files from the disk """ # return os.path.join(TEST_DATA_PATH, file_name) return
1
14,576
this is `test_data` and all other instances are `testdata` - does this change affect any test files, or are there actually no directories called `test_data` in the repo? (I just checked, it seems like there aren't...)
metabrainz-listenbrainz-server
py
@@ -48,10 +48,6 @@ public class ApiVersionStrings { return getBasePath() + "/chatter/"; } - public static String getBaseConnectPath() { - return getBasePath() + "/connect/"; - } - public static String getBaseSObjectPath() { return getBasePath() + "/sobjects/"; }
1
/* * Copyright (c) 2013-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.rest; import android.content.Context; import com.salesforce.androidsdk.R; import com.salesforce.androidsdk.app.SalesforceSDKManager; /** * This is where all the API version info lives. This allows us to change one * line here and affect all our api calls. */ public class ApiVersionStrings { public static final String VERSION_NUMBER = "v42.0"; public static final String API_PREFIX = "/services/data/"; public static String getBasePath() { return API_PREFIX + getVersionNumber(SalesforceSDKManager.getInstance().getAppContext()); } public static String getBaseChatterPath() { return getBasePath() + "/chatter/"; } public static String getBaseConnectPath() { return getBasePath() + "/connect/"; } public static String getBaseSObjectPath() { return getBasePath() + "/sobjects/"; } /** * Returns the API version number to be used. * * @param context Context. Could be null in some test runs. * @return API version number to be used. */ public static String getVersionNumber(Context context) { String apiVersion = VERSION_NUMBER; if (context != null) { apiVersion = context.getString(R.string.api_version); } return apiVersion; } }
1
16,956
Fixing `lint` warnings that have existed for a while.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -24,4 +24,9 @@ const ( // EnvKeyForInstallConfigName is the environment variable to get the // the install config's name EnvKeyForInstallConfigName InstallENVKey = "OPENEBS_IO_INSTALL_CONFIG_NAME" + // CASDefaultCstorPoolENVK is the ENV key that specifies wether default cstor pool + // should be configured or not + // If value is "true", default cstor pool will be configured else for "false" + // it will not be configured. + CASDefaultCstorPool InstallENVKey = "OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL" )
1
/* Copyright 2018 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 // InstallENVKey is a typed string to represent various environment keys // used for install type InstallENVKey string const ( // EnvKeyForInstallConfigName is the environment variable to get the // the install config's name EnvKeyForInstallConfigName InstallENVKey = "OPENEBS_IO_INSTALL_CONFIG_NAME" )
1
9,330
Better to rename this to CASDefaultCstorSparsePool.
openebs-maya
go
@@ -105,4 +105,9 @@ public class S3FileIO implements FileIO { this.awsClientFactory = AwsClientFactories.from(properties); this.s3 = awsClientFactory::s3; } + + @Override + public void close() { + client().close(); + } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.aws.s3; import java.util.Map; import org.apache.iceberg.aws.AwsClientFactories; import org.apache.iceberg.aws.AwsClientFactory; import org.apache.iceberg.aws.AwsProperties; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.util.SerializableSupplier; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; /** * FileIO implementation backed by S3. * <p> * Locations used must follow the conventions for S3 URIs (e.g. s3://bucket/path...). * URIs with schemes s3a, s3n, https are also treated as s3 file paths. * Using this FileIO with other schemes will result in {@link org.apache.iceberg.exceptions.ValidationException}. */ public class S3FileIO implements FileIO { private SerializableSupplier<S3Client> s3; private AwsProperties awsProperties; private AwsClientFactory awsClientFactory; private transient S3Client client; /** * No-arg constructor to load the FileIO dynamically. * <p> * All fields are initialized by calling {@link S3FileIO#initialize(Map)} later. */ public S3FileIO() { } /** * Constructor with custom s3 supplier and default AWS properties. * <p> * Calling {@link S3FileIO#initialize(Map)} will overwrite information set in this constructor. * @param s3 s3 supplier */ public S3FileIO(SerializableSupplier<S3Client> s3) { this(s3, new AwsProperties()); } /** * Constructor with custom s3 supplier and AWS properties. * <p> * Calling {@link S3FileIO#initialize(Map)} will overwrite information set in this constructor. * @param s3 s3 supplier * @param awsProperties aws properties */ public S3FileIO(SerializableSupplier<S3Client> s3, AwsProperties awsProperties) { this.s3 = s3; this.awsProperties = awsProperties; } @Override public InputFile newInputFile(String path) { return S3InputFile.fromLocation(path, client(), awsProperties); } @Override public OutputFile newOutputFile(String path) { return S3OutputFile.fromLocation(path, client(), awsProperties); } @Override public void deleteFile(String path) { S3URI location = new S3URI(path); DeleteObjectRequest deleteRequest = DeleteObjectRequest.builder().bucket(location.bucket()).key(location.key()).build(); client().deleteObject(deleteRequest); } private S3Client client() { if (client == null) { client = s3.get(); } return client; } @Override public void initialize(Map<String, String> properties) { this.awsProperties = new AwsProperties(properties); this.awsClientFactory = AwsClientFactories.from(properties); this.s3 = awsClientFactory::s3; } }
1
39,821
Since we'e not 100% sure if `close` will be called more than once, should we set `client` to `null` or add an `AtomicBoolean closed` that will then handle the idempotency issue?
apache-iceberg
java
@@ -379,11 +379,15 @@ Intersection MotorwayHandler::fromRamp(const EdgeID via_eid, Intersection inters // // 7 1 // 0 + const auto &first_intersection_name = + name_table.GetNameForID(first_intersection_data.name_id).to_string(); + const auto &second_intersection_name = + name_table.GetNameForID(second_intersection_data.name_id).to_string(); if (intersection[1].entry_allowed) { if (isMotorwayClass(intersection[1].eid, node_based_graph) && - second_intersection_data.name_id != EMPTY_NAMEID && - first_intersection_data.name_id != EMPTY_NAMEID && first_second_same_name) + !second_intersection_name.empty() && !first_intersection_name.empty() && + first_second_same_name) { // circular order indicates a merge to the left (0-3 onto 4 if (angularDeviation(intersection[1].angle, STRAIGHT_ANGLE) <
1
#include "extractor/guidance/motorway_handler.hpp" #include "extractor/guidance/constants.hpp" #include "extractor/guidance/road_classification.hpp" #include "util/bearing.hpp" #include "util/guidance/name_announcements.hpp" #include <limits> #include <utility> #include <boost/assert.hpp> using osrm::util::angularDeviation; using osrm::extractor::guidance::getTurnDirection; namespace osrm { namespace extractor { namespace guidance { namespace { inline bool isMotorwayClass(EdgeID eid, const util::NodeBasedDynamicGraph &node_based_graph) { return node_based_graph.GetEdgeData(eid).flags.road_classification.IsMotorwayClass(); } inline RoadClassification roadClass(const ConnectedRoad &road, const util::NodeBasedDynamicGraph &graph) { return graph.GetEdgeData(road.eid).flags.road_classification; } inline bool isRampClass(EdgeID eid, const util::NodeBasedDynamicGraph &node_based_graph) { return node_based_graph.GetEdgeData(eid).flags.road_classification.IsRampClass(); } } // namespace MotorwayHandler::MotorwayHandler(const util::NodeBasedDynamicGraph &node_based_graph, const EdgeBasedNodeDataContainer &node_data_container, const std::vector<util::Coordinate> &coordinates, const util::NameTable &name_table, const SuffixTable &street_name_suffix_table, const IntersectionGenerator &intersection_generator) : IntersectionHandler(node_based_graph, node_data_container, coordinates, name_table, street_name_suffix_table, intersection_generator) { } bool MotorwayHandler::canProcess(const NodeID, const EdgeID via_eid, const Intersection &intersection) const { bool has_motorway = false; bool has_normal_roads = false; for (const auto &road : intersection) { // not merging or forking? if (road.entry_allowed && angularDeviation(road.angle, STRAIGHT_ANGLE) > 60) return false; else if (isMotorwayClass(road.eid, node_based_graph)) { if (road.entry_allowed) has_motorway = true; } else if (!isRampClass(road.eid, node_based_graph)) has_normal_roads = true; } if (has_normal_roads) return false; return has_motorway || isMotorwayClass(via_eid, node_based_graph); } Intersection MotorwayHandler:: operator()(const NodeID, const EdgeID via_eid, Intersection intersection) const { // coming from motorway if (isMotorwayClass(via_eid, node_based_graph)) { intersection = fromMotorway(via_eid, std::move(intersection)); std::for_each(intersection.begin(), intersection.end(), [](ConnectedRoad &road) { if (road.instruction.type == TurnType::OnRamp) road.instruction.type = TurnType::OffRamp; }); return intersection; } else // coming from a ramp { return fromRamp(via_eid, std::move(intersection)); // ramp merging straight onto motorway } } Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection intersection) const { const auto &in_data = node_data_container.GetAnnotation(node_based_graph.GetEdgeData(via_eid).annotation_data); BOOST_ASSERT(isMotorwayClass(via_eid, node_based_graph)); const auto countExitingMotorways = [this](const Intersection &intersection) { unsigned count = 0; for (const auto &road : intersection) { if (road.entry_allowed && isMotorwayClass(road.eid, node_based_graph)) ++count; } return count; }; // find the angle that continues on our current highway const auto getContinueAngle = [this, in_data](const Intersection &intersection) { for (const auto &road : intersection) { if (!road.entry_allowed) continue; const auto &out_data = node_data_container.GetAnnotation( node_based_graph.GetEdgeData(road.eid).annotation_data); const auto same_name = !util::guidance::requiresNameAnnounced( in_data.name_id, out_data.name_id, name_table, street_name_suffix_table); if (road.angle != 0 && in_data.name_id != EMPTY_NAMEID && out_data.name_id != EMPTY_NAMEID && same_name && isMotorwayClass(road.eid, node_based_graph)) return road.angle; } return intersection[0].angle; }; const auto getMostLikelyContinue = [this, in_data](const Intersection &intersection) { double angle = intersection[0].angle; double best = 180; for (const auto &road : intersection) { if (isMotorwayClass(road.eid, node_based_graph) && angularDeviation(road.angle, STRAIGHT_ANGLE) < best) { best = angularDeviation(road.angle, STRAIGHT_ANGLE); angle = road.angle; } } return angle; }; const auto findBestContinue = [&]() { const double continue_angle = getContinueAngle(intersection); if (continue_angle != intersection[0].angle) return continue_angle; else return getMostLikelyContinue(intersection); }; // find continue angle const double continue_angle = findBestContinue(); // highway does not continue and has no obvious choice if (continue_angle == intersection[0].angle) { if (intersection.size() == 2) { // do not announce ramps at the end of a highway intersection[1].instruction = {TurnType::NoTurn, getTurnDirection(intersection[1].angle)}; } else if (intersection.size() == 3) { // splitting ramp at the end of a highway if (intersection[1].entry_allowed && intersection[2].entry_allowed) { assignFork(via_eid, intersection[2], intersection[1]); } else { // ending in a passing ramp if (intersection[1].entry_allowed) intersection[1].instruction = {TurnType::NoTurn, getTurnDirection(intersection[1].angle)}; else intersection[2].instruction = {TurnType::NoTurn, getTurnDirection(intersection[2].angle)}; } } else if (intersection.size() == 4 && roadClass(intersection[1], node_based_graph) == roadClass(intersection[2], node_based_graph) && roadClass(intersection[2], node_based_graph) == roadClass(intersection[3], node_based_graph)) { // tripple fork at the end assignFork(via_eid, intersection[3], intersection[2], intersection[1]); } else if (intersection.countEnterable() > 0) // check whether turns exist at all { // FALLBACK, this should hopefully never be reached return fallback(std::move(intersection)); } } else { const unsigned exiting_motorways = countExitingMotorways(intersection); if (exiting_motorways == 0) { // Ending in Ramp for (auto &road : intersection) { if (road.entry_allowed) { BOOST_ASSERT(isRampClass(road.eid, node_based_graph)); road.instruction = TurnInstruction::SUPPRESSED(getTurnDirection(road.angle)); } } } else if (exiting_motorways == 1) { // normal motorway passing some ramps or mering onto another motorway if (intersection.size() == 2) { BOOST_ASSERT(!isRampClass(intersection[1].eid, node_based_graph)); intersection[1].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection), intersection[1]); } else { // Normal Highway exit or merge for (auto &road : intersection) { // ignore invalid uturns/other if (!road.entry_allowed) continue; if (road.angle == continue_angle) { road.instruction = getInstructionForObvious( intersection.size(), via_eid, isThroughStreet(1, intersection), road); } else if (road.angle < continue_angle) { road.instruction = {isRampClass(road.eid, node_based_graph) ? TurnType::OffRamp : TurnType::Turn, (road.angle < 145) ? DirectionModifier::Right : DirectionModifier::SlightRight}; } else if (road.angle > continue_angle) { road.instruction = {isRampClass(road.eid, node_based_graph) ? TurnType::OffRamp : TurnType::Turn, (road.angle > 215) ? DirectionModifier::Left : DirectionModifier::SlightLeft}; } } } } // handle motorway forks else if (exiting_motorways > 1) { if (exiting_motorways == 2 && intersection.size() == 2) { intersection[1].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection), intersection[1]); intersection[0].entry_allowed = false; // UTURN on the freeway } else if (exiting_motorways == 2) { // standard fork std::size_t first_valid = std::numeric_limits<std::size_t>::max(), second_valid = std::numeric_limits<std::size_t>::max(); for (std::size_t i = 0; i < intersection.size(); ++i) { if (intersection[i].entry_allowed && isMotorwayClass(intersection[i].eid, node_based_graph)) { if (first_valid < intersection.size()) { second_valid = i; break; } else { first_valid = i; } } } assignFork(via_eid, intersection[second_valid], intersection[first_valid]); } else if (exiting_motorways == 3) { // triple fork std::size_t first_valid = std::numeric_limits<std::size_t>::max(), second_valid = std::numeric_limits<std::size_t>::max(), third_valid = std::numeric_limits<std::size_t>::max(); for (std::size_t i = 0; i < intersection.size(); ++i) { if (intersection[i].entry_allowed && isMotorwayClass(intersection[i].eid, node_based_graph)) { if (second_valid < intersection.size()) { third_valid = i; break; } else if (first_valid < intersection.size()) { second_valid = i; } else { first_valid = i; } } } assignFork(via_eid, intersection[third_valid], intersection[second_valid], intersection[first_valid]); } else { return fallback(std::move(intersection)); } } // done for more than one highway exit } return intersection; } Intersection MotorwayHandler::fromRamp(const EdgeID via_eid, Intersection intersection) const { auto num_valid_turns = intersection.countEnterable(); // ramp straight into a motorway/ramp if (intersection.size() == 2 && num_valid_turns == 1) { BOOST_ASSERT(!intersection[0].entry_allowed); BOOST_ASSERT(isMotorwayClass(intersection[1].eid, node_based_graph)); intersection[1].instruction = getInstructionForObvious( intersection.size(), via_eid, isThroughStreet(1, intersection), intersection[1]); } else if (intersection.size() == 3) { const auto &second_intersection_data = node_data_container.GetAnnotation( node_based_graph.GetEdgeData(intersection[2].eid).annotation_data); const auto &first_intersection_data = node_data_container.GetAnnotation( node_based_graph.GetEdgeData(intersection[1].eid).annotation_data); const auto first_second_same_name = !util::guidance::requiresNameAnnounced(second_intersection_data.name_id, first_intersection_data.name_id, name_table, street_name_suffix_table); // merging onto a passing highway / or two ramps merging onto the same highway if (num_valid_turns == 1) { BOOST_ASSERT(!intersection[0].entry_allowed); // check order of highways // 4 // 5 3 // // 6 2 // // 7 1 // 0 if (intersection[1].entry_allowed) { if (isMotorwayClass(intersection[1].eid, node_based_graph) && second_intersection_data.name_id != EMPTY_NAMEID && first_intersection_data.name_id != EMPTY_NAMEID && first_second_same_name) { // circular order indicates a merge to the left (0-3 onto 4 if (angularDeviation(intersection[1].angle, STRAIGHT_ANGLE) < 2 * NARROW_TURN_ANGLE) intersection[1].instruction = {TurnType::Merge, DirectionModifier::SlightLeft}; else // fallback intersection[1].instruction = {TurnType::Merge, getTurnDirection(intersection[1].angle)}; } else // passing by the end of a motorway { intersection[1].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(1, intersection), intersection[1]); } } else { BOOST_ASSERT(intersection[2].entry_allowed); if (isMotorwayClass(intersection[2].eid, node_based_graph) && second_intersection_data.name_id != EMPTY_NAMEID && first_intersection_data.name_id != EMPTY_NAMEID && first_second_same_name) { // circular order (5-0) onto 4 if (angularDeviation(intersection[2].angle, STRAIGHT_ANGLE) < 2 * NARROW_TURN_ANGLE) intersection[2].instruction = {TurnType::Merge, DirectionModifier::SlightRight}; else // fallback intersection[2].instruction = {TurnType::Merge, getTurnDirection(intersection[2].angle)}; } else // passing the end of a highway { intersection[2].instruction = getInstructionForObvious(intersection.size(), via_eid, isThroughStreet(2, intersection), intersection[2]); } } } else { BOOST_ASSERT(num_valid_turns == 2); // UTurn on ramps is not possible BOOST_ASSERT(!intersection[0].entry_allowed); BOOST_ASSERT(intersection[1].entry_allowed); BOOST_ASSERT(intersection[2].entry_allowed); // two motorways starting at end of ramp (fork) // M M // \ / // | // R if (isMotorwayClass(intersection[1].eid, node_based_graph) && isMotorwayClass(intersection[2].eid, node_based_graph)) { assignFork(via_eid, intersection[2], intersection[1]); } else { // continued ramp passing motorway entry // M R // M R // | / // R if (isMotorwayClass(intersection[1].eid, node_based_graph)) { intersection[1].instruction = {TurnType::Turn, DirectionModifier::SlightRight}; intersection[2].instruction = {TurnType::Continue, DirectionModifier::SlightLeft}; } else { assignFork(via_eid, intersection[2], intersection[1]); } } } } // On - Off Ramp on passing Motorway, Ramp onto Fork(?) else if (intersection.size() == 4) { bool passed_highway_entry = false; for (auto &road : intersection) { if (!road.entry_allowed && isMotorwayClass(road.eid, node_based_graph)) { passed_highway_entry = true; } else if (isMotorwayClass(road.eid, node_based_graph)) { road.instruction = {TurnType::Merge, passed_highway_entry ? DirectionModifier::SlightRight : DirectionModifier::SlightLeft}; } else { BOOST_ASSERT(isRampClass(road.eid, node_based_graph)); road.instruction = {TurnType::OffRamp, getTurnDirection(road.angle)}; } } } else { return fallback(std::move(intersection)); } return intersection; } Intersection MotorwayHandler::fallback(Intersection intersection) const { for (auto &road : intersection) { if (!road.entry_allowed) continue; const auto type = isMotorwayClass(road.eid, node_based_graph) ? TurnType::Merge : TurnType::Turn; if (type == TurnType::Turn) { if (angularDeviation(road.angle, STRAIGHT_ANGLE) < FUZZY_ANGLE_DIFFERENCE) road.instruction = {type, DirectionModifier::Straight}; else { road.instruction = {type, road.angle > STRAIGHT_ANGLE ? DirectionModifier::SlightLeft : DirectionModifier::SlightRight}; } } else { road.instruction = {type, road.angle < STRAIGHT_ANGLE ? DirectionModifier::SlightLeft : DirectionModifier::SlightRight}; } } return intersection; } } // namespace guidance } // namespace extractor } // namespace osrm
1
23,162
And here - what happens if name id is invalid
Project-OSRM-osrm-backend
cpp
@@ -0,0 +1,18 @@ +<?php + +declare(strict_types=1); + +namespace Bolt\Storage\Query\Directive; + +use Bolt\Storage\Query\QueryInterface; + +/** + * Directive a raw output of the generated query. + */ +class PrintQueryDirective +{ + public function __invoke(QueryInterface $query): void + { + echo $query; + } +}
1
1
10,638
__toString() is not a part of QueryInterface
bolt-core
php
@@ -43,7 +43,7 @@ from typing import ( cast, TYPE_CHECKING, ) - +import datetime import numpy as np import pandas as pd from pandas.api.types import is_list_like, is_dict_like, is_scalar
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ from collections import OrderedDict, defaultdict, namedtuple from collections.abc import Mapping from distutils.version import LooseVersion import re import warnings import inspect import json import types from functools import partial, reduce import sys from itertools import zip_longest from typing import ( Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Iterator, Dict, Callable, cast, TYPE_CHECKING, ) import numpy as np import pandas as pd from pandas.api.types import is_list_like, is_dict_like, is_scalar if TYPE_CHECKING: from pandas.io.formats.style import Styler if LooseVersion(pd.__version__) >= LooseVersion("0.24"): from pandas.core.dtypes.common import infer_dtype_from_object else: from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.inference import is_sequence import pyspark from pyspark import StorageLevel from pyspark import sql as spark from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.types import ( BooleanType, DoubleType, FloatType, NumericType, StringType, StructType, StructField, ArrayType, ) from pyspark.sql.window import Window from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.accessors import KoalasFrameMethods from databricks.koalas.config import option_context, get_option from databricks.koalas.spark import functions as SF from databricks.koalas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods from databricks.koalas.utils import ( align_diff_frames, column_labels_level, combine_frames, default_session, is_name_like_tuple, is_name_like_value, is_testing, name_like_string, same_anchor, scol_for, validate_arguments_and_invoke_function, validate_axis, validate_bool_kwarg, validate_how, verify_temp_column_name, ) from databricks.koalas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale from databricks.koalas.generic import Frame from databricks.koalas.internal import ( InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT, SPARK_DEFAULT_INDEX_NAME, SPARK_DEFAULT_SERIES_NAME, ) from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.typedef import ( as_spark_type, infer_return_type, spark_type_to_pandas_dtype, DataFrameType, SeriesType, Scalar, ) from databricks.koalas.plot import KoalasPlotAccessor if TYPE_CHECKING: from databricks.koalas.indexes import Index from databricks.koalas.series import Series # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$" ) _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`. Parameters ---------- other : scalar Any single data Returns ------- DataFrame Result of the arithmetic operation. Examples -------- >>> df = ks.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle'], ... columns=['angles', 'degrees']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. Also reverse version. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(df) angles degrees circle 0 720 triangle 6 360 rectangle 8 720 >>> df + df + df angles degrees circle 0 1080 triangle 9 540 rectangle 12 1080 >>> df.radd(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide and true divide by constant with reverse version. >>> df / 10 angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 >>> df.truediv(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rtruediv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract by constant with reverse version. >>> df - 1 angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.sub(1) angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.rsub(1) angles degrees circle 1 -359 triangle -2 -179 rectangle -3 -359 Multiply by constant with reverse version. >>> df * 1 angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.mul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.rmul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Floor Divide by constant with reverse version. >>> df // 10 angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.floordiv(10) angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.rfloordiv(10) # doctest: +SKIP angles degrees circle inf 0.0 triangle 3.0 0.0 rectangle 2.0 0.0 Mod by constant with reverse version. >>> df % 2 angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.mod(2) angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.rmod(2) angles degrees circle NaN 2 triangle 2.0 2 rectangle 2.0 2 Power by constant with reverse version. >>> df ** 2 angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.pow(2) angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.rpow(2) angles degrees circle 1.0 2.348543e+108 triangle 8.0 1.532496e+54 rectangle 16.0 2.348543e+108 """ T = TypeVar("T") def _create_tuple_for_frame_type(params): from databricks.koalas.typedef import NameTypeHolder if isinstance(params, zip): params = [slice(name, tpe) for name, tpe in params] if isinstance(params, slice): params = (params,) if ( hasattr(params, "__len__") and isinstance(params, Iterable) and all(isinstance(param, slice) for param in params) ): for param in params: if isinstance(param.start, str) and param.step is not None: raise TypeError( "Type hints should be specified as " "DataFrame['name': type]; however, got %s" % param ) name_classes = [] for param in params: new_class = type("NameType", (NameTypeHolder,), {}) new_class.name = param.start # When the given argument is a numpy's dtype instance. new_class.tpe = param.stop.type if isinstance(param.stop, np.dtype) else param.stop name_classes.append(new_class) return Tuple[tuple(name_classes)] if not isinstance(params, Iterable): params = [params] params = [param.type if isinstance(param, np.dtype) else param for param in params] return Tuple[tuple(params)] if (3, 5) <= sys.version_info < (3, 7): from typing import GenericMeta # type: ignore # This is a workaround to support variadic generic in DataFrame in Python 3.5+. # See https://github.com/python/typing/issues/193 # We wrap the input params by a tuple to mimic variadic generic. old_getitem = GenericMeta.__getitem__ # type: ignore def new_getitem(self, params): if hasattr(self, "is_dataframe"): return old_getitem(self, _create_tuple_for_frame_type(params)) else: return old_getitem(self, params) GenericMeta.__getitem__ = new_getitem # type: ignore class DataFrame(Frame, Generic[T]): """ Koalas DataFrame that corresponds to pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _internal: an internal immutable Frame to manage metadata. :type _internal: InternalFrame Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \ or Koalas Series Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a Koalas Series, other arguments should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ks.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from pandas DataFrame >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ks.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, InternalFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = data elif isinstance(data, spark.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy internal = InternalFrame(spark_frame=data, index_spark_columns=None) elif isinstance(data, ks.Series): assert index is None assert columns is None assert dtype is None assert not copy data = data.to_frame() internal = data._internal else: if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy pdf = data else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) internal = InternalFrame.from_pandas(pdf) object.__setattr__(self, "_internal_frame", internal) @property def _ksers(self): """ Return a dict of column label -> Series which anchors `self`. """ from databricks.koalas.series import Series if not hasattr(self, "_kseries"): object.__setattr__( self, "_kseries", {label: Series(data=self, index=label) for label in self._internal.column_labels}, ) else: kseries = self._kseries assert len(self._internal.column_labels) == len(kseries), ( len(self._internal.column_labels), len(kseries), ) if any(self is not kser._kdf for kser in kseries.values()): # Refresh the dict to contain only Series anchoring `self`. self._kseries = { label: kseries[label] if self is kseries[label]._kdf else Series(data=self, index=label) for label in self._internal.column_labels } return self._kseries @property def _internal(self) -> InternalFrame: return self._internal_frame def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool = True): """ Update InternalFrame with the given one. If the column_label is changed or the new InternalFrame is not the same `anchor`, disconnect the link to the Series and create a new one. If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy, updating the underlying Spark DataFrame which need to combine a different Spark DataFrame. :param internal: the new InternalFrame :param requires_same_anchor: whether checking the same anchor """ from databricks.koalas.series import Series if hasattr(self, "_kseries"): kseries = {} for old_label, new_label in zip_longest( self._internal.column_labels, internal.column_labels ): if old_label is not None: kser = self._ksers[old_label] renamed = old_label != new_label not_same_anchor = requires_same_anchor and not same_anchor(internal, kser) if renamed or not_same_anchor: kdf = DataFrame(self._internal.select_column(old_label)) # type: DataFrame kser._update_anchor(kdf) kser = None else: kser = None if new_label is not None: if kser is None: kser = Series(data=self, index=new_label) kseries[new_label] = kser self._kseries = kseries self._internal_frame = internal if hasattr(self, "_repr_pandas_cache"): del self._repr_pandas_cache @property def ndim(self) -> int: """ Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 NaN 7 8 >>> df.ndim 2 """ return 2 @property def axes(self) -> List: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=True, **kwargs): """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity check because series only support index axis. name : original pandas API name. axis : axis to apply. 0 or 1, or 'index' or 'columns. numeric_only : bool, default True Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter currently. """ from inspect import signature from databricks.koalas.series import Series, first_series axis = validate_axis(axis) if axis == 0: min_count = kwargs.get("min_count", 0) exprs = [F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] new_column_labels = [] num_args = len(signature(sfun).parameters) for label in self._internal.column_labels: spark_column = self._internal.spark_column_for(label) spark_type = self._internal.spark_type_for(label) is_numeric_or_boolean = isinstance(spark_type, (NumericType, BooleanType)) keep_column = not numeric_only or is_numeric_or_boolean if keep_column: if num_args == 1: # Only pass in the column if sfun accepts only one arg scol = sfun(spark_column) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args scol = sfun(spark_column, spark_type) if min_count > 0: scol = F.when( Frame._count_expr(spark_column, spark_type) >= min_count, scol ) exprs.append(scol.alias(name_like_string(label))) new_column_labels.append(label) if len(exprs) == 1: return Series([]) sdf = self._internal.spark_frame.select(*exprs) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) else: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs) if len(pdf) <= limit: return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) def calculate_columns_axis(*cols): return getattr(pd.concat(cols, axis=1), name)( axis=axis, numeric_only=numeric_only, **kwargs ) column_name = verify_temp_column_name( self._internal.spark_frame.select(self._internal.index_spark_columns), "__calculate_columns_axis__", ) sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)] ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_dtypes=self._internal.index_dtypes, ) return first_series(DataFrame(internal)).rename(pser.name) def _kser_for(self, label): """ Create Series with a proper column label. The given label must be verified to exist in `InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ks.range(3) `self._kser_for(label)` can be used with `InternalFrame.column_labels`: >>> self._kser_for(self._internal.column_labels[0]) 0 0 1 1 2 2 Name: id, dtype: int64 `self._kser_for(label)` must not be used directly with user inputs. In that case, `self[label]` should be used instead, which checks the label exists or not: >>> self['id'] 0 0 1 1 2 2 Name: id, dtype: int64 """ return self._ksers[label] def _apply_series_op(self, op, should_resolve: bool = False): applied = [] for label in self._internal.column_labels: applied.append(op(self._kser_for(label))) internal = self._internal.with_new_columns(applied) if should_resolve: internal = internal.resolved_copy return DataFrame(internal) # Arithmetic Operators def _map_series_op(self, op, other): from databricks.koalas.base import IndexOpsMixin if not isinstance(other, DataFrame) and ( isinstance(other, IndexOpsMixin) or is_sequence(other) ): raise ValueError( "%s with a sequence is currently not supported; " "however, got %s." % (op, type(other).__name__) ) if isinstance(other, DataFrame): if self._internal.column_labels_level != other._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") if not same_anchor(self, other): # Different DataFrames def apply_op(kdf, this_column_labels, that_column_labels): for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( getattr(kdf._kser_for(this_label), op)( kdf._kser_for(that_label) ).rename(this_label), this_label, ) return align_diff_frames(apply_op, self, other, fillna=True, how="full") else: applied = [] column_labels = [] for label in self._internal.column_labels: if label in other._internal.column_labels: applied.append(getattr(self._kser_for(label), op)(other._kser_for(label))) else: applied.append( F.lit(None) .cast(self._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) for label in other._internal.column_labels: if label not in column_labels: applied.append( F.lit(None) .cast(other._internal.spark_type_for(label)) .alias(name_like_string(label)) ) column_labels.append(label) internal = self._internal.with_new_columns(applied, column_labels=column_labels) return DataFrame(internal) else: return self._apply_series_op(lambda kser: getattr(kser, op)(other)) def __add__(self, other) -> "DataFrame": return self._map_series_op("add", other) def __radd__(self, other) -> "DataFrame": return self._map_series_op("radd", other) def __div__(self, other) -> "DataFrame": return self._map_series_op("div", other) def __rdiv__(self, other) -> "DataFrame": return self._map_series_op("rdiv", other) def __truediv__(self, other) -> "DataFrame": return self._map_series_op("truediv", other) def __rtruediv__(self, other) -> "DataFrame": return self._map_series_op("rtruediv", other) def __mul__(self, other) -> "DataFrame": return self._map_series_op("mul", other) def __rmul__(self, other) -> "DataFrame": return self._map_series_op("rmul", other) def __sub__(self, other) -> "DataFrame": return self._map_series_op("sub", other) def __rsub__(self, other) -> "DataFrame": return self._map_series_op("rsub", other) def __pow__(self, other) -> "DataFrame": return self._map_series_op("pow", other) def __rpow__(self, other) -> "DataFrame": return self._map_series_op("rpow", other) def __mod__(self, other) -> "DataFrame": return self._map_series_op("mod", other) def __rmod__(self, other) -> "DataFrame": return self._map_series_op("rmod", other) def __floordiv__(self, other) -> "DataFrame": return self._map_series_op("floordiv", other) def __rfloordiv__(self, other) -> "DataFrame": return self._map_series_op("rfloordiv", other) def __abs__(self) -> "DataFrame": return self._apply_series_op(lambda kser: abs(kser)) def __neg__(self) -> "DataFrame": return self._apply_series_op(lambda kser: -kser) def add(self, other) -> "DataFrame": return self + other # create accessor for plot plot = CachedAccessor("plot", KoalasPlotAccessor) # create accessor for Spark related methods. spark = CachedAccessor("spark", SparkFrameMethods) # create accessor for Koalas specific methods. koalas = CachedAccessor("koalas", KoalasFrameMethods) def hist(self, bins=10, **kwds): return self.plot.hist(bins, **kwds) hist.__doc__ = KoalasPlotAccessor.hist.__doc__ def kde(self, bw_method=None, ind=None, **kwds): return self.plot.kde(bw_method, ind, **kwds) kde.__doc__ = KoalasPlotAccessor.kde.__doc__ add.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd" ) def radd(self, other) -> "DataFrame": return other + self radd.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="other + dataframe", reverse="add" ) def div(self, other) -> "DataFrame": return self / other div.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv" ) divide = div def rdiv(self, other) -> "DataFrame": return other / self rdiv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div" ) def truediv(self, other) -> "DataFrame": return self / other truediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv" ) def rtruediv(self, other) -> "DataFrame": return other / self rtruediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv" ) def mul(self, other) -> "DataFrame": return self * other mul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul" ) multiply = mul def rmul(self, other) -> "DataFrame": return other * self rmul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul" ) def sub(self, other) -> "DataFrame": return self - other sub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub" ) subtract = sub def rsub(self, other) -> "DataFrame": return other - self rsub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub" ) def mod(self, other) -> "DataFrame": return self % other mod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod" ) def rmod(self, other) -> "DataFrame": return other % self rmod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod" ) def pow(self, other) -> "DataFrame": return self ** other pow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow" ) def rpow(self, other) -> "DataFrame": return other ** self rpow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow" ) def floordiv(self, other) -> "DataFrame": return self // other floordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv" ) def rfloordiv(self, other) -> "DataFrame": return other // self rfloordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv" ) # Comparison Operators def __eq__(self, other) -> "DataFrame": # type: ignore return self._map_series_op("eq", other) def __ne__(self, other) -> "DataFrame": # type: ignore return self._map_series_op("ne", other) def __lt__(self, other) -> "DataFrame": return self._map_series_op("lt", other) def __le__(self, other) -> "DataFrame": return self._map_series_op("le", other) def __ge__(self, other) -> "DataFrame": return self._map_series_op("ge", other) def __gt__(self, other) -> "DataFrame": return self._map_series_op("gt", other) def eq(self, other) -> "DataFrame": """ Compare if the current value is equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False """ return self == other equals = eq def gt(self, other) -> "DataFrame": """ Compare if the current value is greater than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False """ return self > other def ge(self, other) -> "DataFrame": """ Compare if the current value is greater than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True False """ return self >= other def lt(self, other) -> "DataFrame": """ Compare if the current value is less than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False """ return self < other def le(self, other) -> "DataFrame": """ Compare if the current value is less than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False False """ return self <= other def ne(self, other) -> "DataFrame": """ Compare if the current value is not equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True """ return self != other def applymap(self, func) -> "DataFrame": """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> def str_len(x) -> int: ... return len(str(x)) >>> df.applymap(str_len) 0 1 0 3 4 1 5 5 >>> def power(x) -> float: ... return x ** 2 >>> df.applymap(power) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 You can omit the type hint and let Koalas infer its type. >>> df.applymap(lambda x: x ** 2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # TODO: We can implement shortcut theoretically since it creates new DataFrame # anyway and we don't have to worry about operations on different DataFrames. return self._apply_series_op(lambda kser: kser.apply(func)) # TODO: not all arguments are implemented comparing to pandas' for now. def aggregate( self, func: Union[List[str], Dict[Any, List[str]]] ) -> Union["Series", "DataFrame", "Index"]: """Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Invoke function on DataFrame. DataFrame.transform : Only perform transforming type operations. DataFrame.groupby : Perform operations over groups. Series.aggregate : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) >>> df A B C 0 1.0 2.0 3.0 1 4.0 5.0 6.0 2 7.0 8.0 9.0 3 NaN NaN NaN Aggregate these functions over the rows. >>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index() A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index() A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN For multi-index columns: >>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) >>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index() X Y A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 >>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']}) >>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE X A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN """ from databricks.koalas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError( "If the given function is a list, it " "should only contains function names as strings." ) if not isinstance(func, dict) or not all( is_name_like_value(key) and ( isinstance(value, str) or (isinstance(value, list) and all(isinstance(v, str) for v in value)) ) for key, value in func.items() ): raise ValueError( "aggs must be a dict mapping from column name to aggregate " "functions (string or list of strings)." ) with option_context("compute.default_index_type", "distributed"): kdf = DataFrame(GroupBy._spark_groupby(self, func)) # type: DataFrame # The codes below basically converts: # # A B # sum min min max # 0 12.0 1.0 2.0 8.0 # # to: # A B # max NaN 8.0 # min 1.0 2.0 # sum 12.0 NaN # # Aggregated output is usually pretty much small. if LooseVersion(pyspark.__version__) >= LooseVersion("2.4"): return kdf.stack().droplevel(0)[list(func.keys())] else: pdf = kdf._to_internal_pandas().stack() pdf.index = pdf.index.droplevel() return ks.from_pandas(pdf[list(func.keys())]) agg = aggregate def corr(self, method="pearson") -> Union["Series", "DataFrame", "Index"]: """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : DataFrame See Also -------- Series.corr Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ return ks.from_pandas(corr(self, method)) def iteritems(self) -> Iterator: """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ return ( (label if len(label) > 1 else label[0], self._kser_for(label)) for label in self._internal.column_labels ) def iterrows(self) -> Iterator: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = ks.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row): k = ( row[internal_index_columns[0]] if len(internal_index_columns) == 1 else tuple(row[c] for c in internal_index_columns) ) v = [row[c] for c in internal_data_columns] return k, v for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator() ): s = pd.Series(v, index=columns, name=k) yield k, s def itertuples(self, index: bool = True, name: Optional[str] = "Koalas") -> Iterator: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Koalas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. On python versions < 3.7 regular tuples are returned for DataFrames with a large number of columns (>254). Examples -------- >>> df = ks.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Koalas(Index='dog', num_legs=4, num_wings=0) Koalas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Koalas(num_legs=4, num_wings=0) Koalas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ fields = list(self.columns) if index: fields.insert(0, "Index") index_spark_column_names = self._internal.index_spark_column_names data_spark_column_names = self._internal.data_spark_column_names def extract_kv_from_spark_row(row): k = ( row[index_spark_column_names[0]] if len(index_spark_column_names) == 1 else tuple(row[c] for c in index_spark_column_names) ) v = [row[c] for c in data_spark_column_names] return k, v can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255 if name is not None and can_return_named_tuples: itertuple = namedtuple(name, fields, rename=True) # type: ignore for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator(), ): yield itertuple._make(([k] if index else []) + list(v)) else: for k, v in map( extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator(), ): yield tuple(([k] if index else []) + list(v)) def items(self) -> Iterator: """This is an alias of ``iteritems``.""" return self.iteritems() def to_clipboard(self, excel=True, sep=None, **kwargs) -> None: """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none See Also -------- read_clipboard : Read text from clipboard. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args ) def to_html( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False, ) -> Optional[str]: """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args ) def to_string( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", line_width=None, ) -> Optional[str]: """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args ) def to_dict(self, orient="dict", into=dict) -> Union[List, Mapping]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args ) def to_latex( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal=".", multicolumn=None, multicolumn_format=None, multirow=None, ) -> Optional[str]: r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE \begin{tabular}{lll} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule \end{tabular} <BLANKLINE> """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args ) # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic # when creating arrays) def transpose(self) -> "DataFrame": """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from databricks.koalas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ks.DataFrame({'a': range(1001)}).transpose() Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Returns ------- DataFrame The transposed DataFrame. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the coerced dtype. For instance, if int and float have to be placed in same column, it becomes float. If type coercion is not possible, it fails. Also, note that the values in index should be unique because they become unique column names. In addition, if Spark 2.3 is used, the types should always be exactly same. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2']) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP >>> df1_transposed # doctest: +SKIP 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes # doctest: +SKIP 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'score': [9.5, 8], ... 'kids': [0, 0], ... 'age': [12, 22]} >>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age']) >>> df2 score kids age 0 9.5 0 12 1 8.0 0 22 >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP >>> df2_transposed # doctest: +SKIP 0 1 age 12.0 22.0 kids 0.0 0.0 score 9.5 8.0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the coerced dtype: >>> df2.dtypes score float64 kids int64 age int64 dtype: object >>> df2_transposed.dtypes # doctest: +SKIP 0 float64 1 float64 dtype: object """ max_compute_count = get_option("compute.max_rows") if max_compute_count is not None: pdf = self.head(max_compute_count + 1)._to_internal_pandas() if len(pdf) > max_compute_count: raise ValueError( "Current DataFrame has more then the given limit {0} rows. " "Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' " "to retrieve to retrieve more than {0} rows. Note that, before changing the " "'compute.max_rows', this operation is considerably expensive.".format( max_compute_count ) ) return DataFrame(pdf.transpose()) # Explode the data to be pairs. # # For instance, if the current input DataFrame is as below: # # +------+------+------+------+------+ # |index1|index2|(a,x1)|(a,x2)|(b,x3)| # +------+------+------+------+------+ # | y1| z1| 1| 0| 0| # | y2| z2| 0| 50| 0| # | y3| z3| 3| 2| 1| # +------+------+------+------+------+ # # Output of `exploded_df` becomes as below: # # +-----------------+-----------------+-----------------+-----+ # | index|__index_level_0__|__index_level_1__|value| # +-----------------+-----------------+-----------------+-----+ # |{"a":["y1","z1"]}| a| x1| 1| # |{"a":["y1","z1"]}| a| x2| 0| # |{"a":["y1","z1"]}| b| x3| 0| # |{"a":["y2","z2"]}| a| x1| 0| # |{"a":["y2","z2"]}| a| x2| 50| # |{"a":["y2","z2"]}| b| x3| 0| # |{"a":["y3","z3"]}| a| x1| 3| # |{"a":["y3","z3"]}| a| x2| 2| # |{"a":["y3","z3"]}| b| x3| 1| # +-----------------+-----------------+-----------------+-----+ pairs = F.explode( F.array( *[ F.struct( [ F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label) ] + [self._internal.spark_column_for(label).alias("value")] ) for label in self._internal.column_labels ] ) ) exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select( [ F.to_json( F.struct( F.array([scol for scol in self._internal.index_spark_columns]).alias("a") ) ).alias("index"), F.col("pairs.*"), ] ) # After that, executes pivot with key and its index column. # Note that index column should contain unique values since column names # should be unique. internal_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index") transposed_df = pivoted_df.agg(F.first(F.col("value"))) new_data_columns = list( filter(lambda x: x not in internal_index_columns, transposed_df.columns) ) column_labels = [ None if len(label) == 1 and label[0] is None else label for label in (tuple(json.loads(col)["a"]) for col in new_data_columns) ] internal = InternalFrame( spark_frame=transposed_df, index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns], index_names=self._internal.column_label_names, column_labels=column_labels, data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=self._internal.index_names, ) return DataFrame(internal) T = property(transpose) def apply_batch(self, func, args=(), **kwds) -> "DataFrame": warnings.warn( "DataFrame.apply_batch is deprecated as of DataFrame.koalas.apply_batch. " "Please use the API instead.", FutureWarning, ) return self.koalas.apply_batch(func, args=args, **kwds) apply_batch.__doc__ = KoalasFrameMethods.apply_batch.__doc__ # TODO: Remove this API when Koalas 2.0.0. def map_in_pandas(self, func) -> "DataFrame": warnings.warn( "DataFrame.map_in_pandas is deprecated as of DataFrame.koalas.apply_batch. " "Please use the API instead.", FutureWarning, ) return self.koalas.apply_batch(func) map_in_pandas.__doc__ = KoalasFrameMethods.apply_batch.__doc__ def apply(self, func, axis=0, args=(), **kwds) -> Union["Series", "DataFrame", "Index"]: """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: when `axis` is 0 or 'index', the `func` is unable to access to the whole input series. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole series but of the batch internally ... # used. ... def length(s) -> int: ... return len(s) ... >>> df = ks.DataFrame({'A': range(1000)}) >>> df.apply(length, axis=0) # doctest: +SKIP 0 83 1 83 2 83 ... 10 83 11 83 dtype: int32 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify the return type as `Series` or scalar value in ``func``, for instance, as below: >>> def square(s) -> ks.Series[np.int32]: ... return s ** 2 Koalas uses return type hint and does not try to infer the type. In case when axis is 1, it requires to specify `DataFrame` or scalar value with type hints as below: >>> def plus_one(x) -> ks.DataFrame[float, float]: ... return x + 1 If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a pandas friendly style as below: >>> def plus_one(x) -> ks.DataFrame["a": float, "b": float]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ks.DataFrame[zip(pdf.dtypes, pdf.columns)]: ... return x + 1 However, this way switches the index type to default index type in the output because the type hint cannot express the index type at this moment. Use `reset_index()` to keep index as a workaround. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap : For elementwise operations. DataFrame.aggregate : Only perform aggregating type operations. DataFrame.transform : Only perform transforming type operations. Series.apply : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> def sqrt(x) -> ks.Series[float]: ... return np.sqrt(x) ... >>> df.apply(sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 You can omit the type hint and let Koalas infer its type. >>> df.apply(np.sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 When `axis` is 1 or 'columns', it applies the function for each row. >>> def summation(x) -> np.int64: ... return np.sum(x) ... >>> df.apply(summation, axis=1) 0 13 1 13 2 13 dtype: int64 Likewise, you can omit the type hint and let Koalas infer its type. >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 >>> df.apply(max, axis=1) 0 9 1 9 2 9 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object In order to specify the types when `axis` is '1', it should use DataFrame[...] annotation. In this case, the column names are automatically generated. >>> def identify(x) -> ks.DataFrame['A': np.int64, 'B': np.int64]: ... return x ... >>> df.apply(identify, axis=1) A B 0 4 9 1 4 9 2 4 9 You can also specify extra arguments. >>> def plus_two(a, b, c) -> ks.DataFrame[np.int64, np.int64]: ... return a + b + c ... >>> df.apply(plus_two, axis=1, args=(1,), c=3) c0 c1 0 8 13 1 8 13 2 8 13 """ from databricks.koalas.groupby import GroupBy from databricks.koalas.series import first_series if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func func = lambda *args, **kwargs: f(*args, **kwargs) axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None should_use_map_in_pandas = LooseVersion(pyspark.__version__) >= "3.0" def apply_func(pdf): pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser self_applied = DataFrame(self._internal.resolved_copy) # type: "DataFrame" column_labels = None # type: Optional[List[Tuple]] if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self_applied.head(limit + 1)._to_internal_pandas() applied = pdf.apply(func, axis=axis, args=args, **kwds) kser_or_kdf = ks.from_pandas(applied) if len(pdf) <= limit: return kser_or_kdf kdf = kser_or_kdf if isinstance(kser_or_kdf, ks.Series): should_return_series = True kdf = kser_or_kdf._kdf return_schema = force_decimal_precision_scale( as_nullable_spark_type(kdf._internal.to_internal_spark_frame.schema) ) if should_use_map_in_pandas: output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=True ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) else: sdf = GroupBy._spark_group_map_apply( self_applied, apply_func, (F.spark_partition_id(),), return_schema, retain_index=True, ) # If schema is inferred, we can restore indexes too. internal = kdf._internal.with_new_sdf(sdf) else: return_type = infer_return_type(func) return_schema = return_type.tpe require_index_axis = isinstance(return_type, SeriesType) require_column_axis = isinstance(return_type, DataFrameType) if require_index_axis: if axis != 0: raise TypeError( "The given function should specify a scalar or a series as its type " "hints when axis is 0 or 'index'; however, the return type " "was %s" % return_sig ) fields_types = zip( self_applied.columns, [return_schema] * len(self_applied.columns) ) return_schema = StructType([StructField(c, t) for c, t in fields_types]) elif require_column_axis: if axis != 1: raise TypeError( "The given function should specify a scalar or a frame as its type " "hints when axis is 1 or 'column'; however, the return type " "was %s" % return_sig ) else: # any axis is fine. should_return_series = True return_schema = StructType([StructField(SPARK_DEFAULT_SERIES_NAME, return_schema)]) column_labels = [None] if should_use_map_in_pandas: output_func = GroupBy._make_pandas_df_builder_func( self_applied, apply_func, return_schema, retain_index=False ) sdf = self_applied._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) else: sdf = GroupBy._spark_group_map_apply( self_applied, apply_func, (F.spark_partition_id(),), return_schema, retain_index=False, ) # Otherwise, it loses index. internal = InternalFrame( spark_frame=sdf, index_spark_columns=None, column_labels=column_labels ) result = DataFrame(internal) # type: "DataFrame" if should_return_series: return first_series(result) else: return result def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame": """ Call ``func`` on self producing a Series with transformed values and that has the same length as its input. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> ks.Series[np.int32]: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. .. note:: the series within ``func`` is actually multiple pandas series as the segments of the whole Koalas series; therefore, the length of each series is not guaranteed. As an example, an aggregation against each series does work as a global aggregation but an aggregation of each segment. See below: >>> def func(x) -> ks.Series[np.int32]: ... return x + sum(x) Parameters ---------- func : function Function to use for transforming the data. It must work when pandas Series is passed. axis : int, default 0 or 'index' Can only be set to 0 at the moment. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame A DataFrame that must have the same length as self. Raises ------ Exception : If the returned DataFrame has a different length than self. See Also -------- DataFrame.aggregate : Only perform aggregating type operations. DataFrame.apply : Invoke function on DataFrame. Series.transform : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B']) >>> df A B 0 0 1 1 1 2 2 2 3 >>> def square(x) -> ks.Series[np.int32]: ... return x ** 2 >>> df.transform(square) A B 0 0 1 1 1 4 2 4 9 You can omit the type hint and let Koalas infer its type. >>> df.transform(lambda x: x ** 2) A B 0 0 1 1 1 4 2 4 9 For multi-index columns: >>> df.columns = [('X', 'A'), ('X', 'B')] >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 >>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 2 2 2 3 You can also specify extra arguments. >>> def calculation(x, y, z) -> ks.Series[int]: ... return x ** y + z >>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE X A B 0 20 21 1 21 1044 2 1044 59069 """ if not isinstance(func, types.FunctionType): assert callable(func), "the first argument should be a callable function." f = func func = lambda *args, **kwargs: f(*args, **kwargs) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() transformed = pdf.transform(func, axis, *args, **kwargs) kdf = DataFrame(transformed) # type: "DataFrame" if len(pdf) <= limit: return kdf applied = [] for input_label, output_label in zip( self._internal.column_labels, kdf._internal.column_labels ): pudf = pandas_udf( lambda c: func(c, *args, **kwargs), returnType=force_decimal_precision_scale( as_nullable_spark_type(kdf._internal.spark_type_for(output_label)) ), functionType=PandasUDFType.SCALAR, ) kser = self._kser_for(input_label) applied.append(kser._with_new_scol(scol=pudf(kser.spark.column))) internal = self._internal.with_new_columns( applied, data_dtypes=kdf._internal.data_dtypes ) return DataFrame(internal) else: return self._apply_series_op( lambda kser: kser.koalas.transform_batch(func, *args, **kwargs) ) def transform_batch(self, func, *args, **kwargs) -> "DataFrame": warnings.warn( "DataFrame.transform_batch is deprecated as of DataFrame.koalas.transform_batch. " "Please use the API instead.", FutureWarning, ) return self.koalas.transform_batch(func, *args, **kwargs) transform_batch.__doc__ = KoalasFrameMethods.transform_batch.__doc__ def pop(self, item) -> "DataFrame": """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN Also support for MultiIndex >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df a b name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('a') name class 0 falcon bird 1 parrot bird 2 lion mammal 3 monkey mammal >>> df b max_speed 0 389.0 1 24.0 2 80.5 3 NaN """ result = self[item] self._update_internal_frame(self.drop(item)._internal) return result # TODO: add axis parameter can work when '1' or 'columns' def xs(self, key, axis=0, level=None) -> Union["DataFrame", "Series"]: """ Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. currently only support 0 or 'index' level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- DataFrame or Series Cross-section from the original DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = ks.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings locomotion walks 4 0 >>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class locomotion mammal walks 4 0 """ from databricks.koalas.series import first_series if not is_name_like_value(key): raise ValueError("'key' should be a scalar value or tuple that contains scalar values") if level is not None and is_name_like_tuple(key): raise KeyError(key) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if not is_name_like_tuple(key): key = (key,) if len(key) > self._internal.index_level: raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(key), self._internal.index_level ) ) if level is None: level = 0 rows = [ self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level) ] internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows)) if len(key) == self._internal.index_level: kdf = DataFrame(internal) # type: DataFrame pdf = kdf.head(2)._to_internal_pandas() if len(pdf) == 0: raise KeyError(key) elif len(pdf) > 1: return kdf else: return first_series(DataFrame(pdf.transpose())) else: index_spark_columns = ( internal.index_spark_columns[:level] + internal.index_spark_columns[level + len(key) :] ) index_names = internal.index_names[:level] + internal.index_names[level + len(key) :] index_dtypes = internal.index_dtypes[:level] + internal.index_dtypes[level + len(key) :] internal = internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_dtypes=index_dtypes, ).resolved_copy return DataFrame(internal) def where(self, cond, other=np.nan) -> "DataFrame": """ Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.where(df1 > 0).sort_index() A B 0 NaN 100.0 1 1.0 200.0 2 2.0 300.0 3 3.0 400.0 4 4.0 500.0 >>> df1.where(df1 > 1, 10).sort_index() A B 0 10 100 1 10 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df1 + 100).sort_index() A B 0 100 100 1 101 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df2).sort_index() A B 0 0 100 1 -1 200 2 2 300 3 3 400 4 4 500 When the column name of cond is different from self, it treats all values are False >>> cond = ks.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0 >>> cond C D 0 True False 1 False True 2 False False 3 True False 4 False True >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN When the type of cond is Series, it just check boolean regardless of column name >>> cond = ks.Series([1, 2]) > 1 >>> cond 0 False 1 True dtype: bool >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 1.0 200.0 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series tmp_cond_col_name = "__tmp_cond_col_{}__".format tmp_other_col_name = "__tmp_other_col_{}__".format kdf = self.copy() tmp_cond_col_names = [ tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(cond, DataFrame): cond = cond[ [ ( cond._internal.spark_column_for(label) if label in cond._internal.column_labels else F.lit(False) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_cond_col_names) ] ] kdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[ [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names] ] kdf[tmp_cond_col_names] = cond else: raise ValueError("type of cond must be a DataFrame or Series") tmp_other_col_names = [ tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(other, DataFrame): other = other[ [ ( other._internal.spark_column_for(label) if label in other._internal.column_labels else F.lit(np.nan) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_other_col_names) ] ] kdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[ [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names] ] kdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: kdf[tmp_other_col_name(name_like_string(label))] = other # above logic make spark dataframe looks like below: # +-----------------+---+---+------------------+-------------------+------------------+--... # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__... # +-----------------+---+---+------------------+-------------------+------------------+--... # | 0| 0|100| true| 0| false| ... # | 1| 1|200| false| -1| false| ... # | 3| 3|400| true| -3| false| ... # | 2| 2|300| false| -2| true| ... # | 4| 4|500| false| -4| false| ... # +-----------------+---+---+------------------+-------------------+------------------+--... data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append( F.when( kdf[tmp_cond_col_name(name_like_string(label))].spark.column, kdf._internal.spark_column_for(label), ) .otherwise(kdf[tmp_other_col_name(name_like_string(label))].spark.column) .alias(kdf._internal.spark_column_name_for(label)) ) return DataFrame( kdf._internal.with_new_columns( data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes? ) ) def mask(self, cond, other=np.nan) -> "DataFrame": """ Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.mask(df1 > 0).sort_index() A B 0 0.0 NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> df1.mask(df1 > 1, 10).sort_index() A B 0 0 10 1 1 10 2 10 10 3 10 10 4 10 10 >>> df1.mask(df1 > 1, df1 + 100).sort_index() A B 0 0 200 1 1 300 2 102 400 3 103 500 4 104 600 >>> df1.mask(df1 > 1, df2).sort_index() A B 0 0 -100 1 1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series if not isinstance(cond, (DataFrame, Series)): raise ValueError("type of cond must be a DataFrame or Series") cond_inversed = cond._apply_series_op(lambda kser: ~kser) return self.where(cond_inversed, other) @property def index(self) -> "Index": """The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index """ from databricks.koalas.indexes.base import Index return Index._new_instance(self) @property def empty(self) -> bool: """ Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ks.range(10).empty False >>> ks.range(0).empty True >>> ks.DataFrame({}, index=list('abc')).empty True """ return ( len(self._internal.column_labels) == 0 or self._internal.resolved_copy.spark_frame.rdd.isEmpty() ) @property def style(self) -> "Styler": """ Property returning a Styler object containing methods for building a styled HTML representation for the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ks.range(1001).style # doctest: +ELLIPSIS <pandas.io.formats.style.Styler object at ...> """ max_results = get_option("compute.max_rows") pdf = self.head(max_results + 1)._to_internal_pandas() if len(pdf) > max_results: warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning) return pdf.head(max_results).style def set_index(self, keys, drop=True, append=False, inplace=False) -> Optional["DataFrame"]: """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ks.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ inplace = validate_bool_kwarg(inplace, "inplace") if is_name_like_tuple(keys): keys = [keys] elif is_name_like_value(keys): keys = [(keys,)] else: keys = [key if is_name_like_tuple(key) else (key,) for key in keys] columns = set(self._internal.column_labels) for key in keys: if key not in columns: raise KeyError(name_like_string(key)) if drop: column_labels = [label for label in self._internal.column_labels if label not in keys] else: column_labels = self._internal.column_labels if append: index_spark_columns = self._internal.index_spark_columns + [ self._internal.spark_column_for(label) for label in keys ] index_names = self._internal.index_names + keys index_dtypes = self._internal.index_dtypes + [ self._internal.dtype_for(label) for label in keys ] else: index_spark_columns = [self._internal.spark_column_for(label) for label in keys] index_names = keys index_dtypes = [self._internal.dtype_for(label) for label in keys] internal = self._internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_dtypes=index_dtypes, column_labels=column_labels, data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels], data_dtypes=[self._internal.dtype_for(label) for label in column_labels], ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def reset_index( self, level=None, drop=False, inplace=False, col_level=0, col_fill="" ) -> Optional["DataFrame"]: """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ks.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, Koalas does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = ks.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df # doctest: +NORMALIZE_WHITESPACE speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") multi_index = self._internal.index_level > 1 def rename(index): if multi_index: return ("level_{}".format(index),) else: if ("index",) not in self._internal.column_labels: return ("index",) else: return ("level_{}".format(index),) if level is None: new_column_labels = [ name if name is not None else rename(i) for i, name in enumerate(self._internal.index_names) ] new_data_spark_columns = [ scol.alias(name_like_string(label)) for scol, label in zip(self._internal.index_spark_columns, new_column_labels) ] new_data_dtypes = self._internal.index_dtypes index_spark_columns = [] index_names = [] index_dtypes = [] else: if is_list_like(level): level = list(level) if isinstance(level, int) or is_name_like_tuple(level): level = [level] elif is_name_like_value(level): level = [(level,)] else: level = [ lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,) for lvl in level ] if all(isinstance(l, int) for l in level): for lev in level: if lev >= self._internal.index_level: raise IndexError( "Too many levels: Index has only {} level, not {}".format( self._internal.index_level, lev + 1 ) ) idx = level elif all(is_name_like_tuple(lev) for lev in level): idx = [] for l in level: try: i = self._internal.index_names.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError("Level unknown not found") else: raise KeyError( "Level unknown must be same as name ({})".format( name_like_string(self._internal.index_names[0]) ) ) else: raise ValueError("Level should be all int or all string.") idx.sort() new_column_labels = [] new_data_spark_columns = [] new_data_dtypes = [] index_spark_columns = self._internal.index_spark_columns.copy() index_names = self._internal.index_names.copy() index_dtypes = self._internal.index_dtypes.copy() for i in idx[::-1]: name = index_names.pop(i) new_column_labels.insert(0, name if name is not None else rename(i)) scol = index_spark_columns.pop(i) new_data_spark_columns.insert(0, scol.alias(name_like_string(name))) new_data_dtypes.insert(0, index_dtypes.pop(i)) if drop: new_data_spark_columns = [] new_column_labels = [] new_data_dtypes = [] for label in new_column_labels: if label in self._internal.column_labels: raise ValueError("cannot insert {}, already exists".format(name_like_string(label))) if self._internal.column_labels_level > 1: column_depth = len(self._internal.column_labels[0]) if col_level >= column_depth: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( column_depth, col_level + 1 ) ) if any(col_level + len(label) > column_depth for label in new_column_labels): raise ValueError("Item must have length equal to number of levels.") new_column_labels = [ tuple( ([col_fill] * col_level) + list(label) + ([col_fill] * (column_depth - (len(label) + col_level))) ) for label in new_column_labels ] internal = self._internal.copy( index_spark_columns=index_spark_columns, index_names=index_names, index_dtypes=index_dtypes, column_labels=new_column_labels + self._internal.column_labels, data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns, data_dtypes=new_data_dtypes + self._internal.data_dtypes, ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def isnull(self) -> "DataFrame": """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- DataFrame.notnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ return self._apply_series_op(lambda kser: kser.isnull()) isna = isnull def notnull(self) -> "DataFrame": """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- DataFrame.isnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ return self._apply_series_op(lambda kser: kser.notnull()) notna = notnull def insert( self, loc: int, column, value: Union[Scalar, "Series", Iterable], allow_duplicates: bool = False, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : int, Series, or array-like allow_duplicates : bool, optional Examples -------- >>> kdf = ks.DataFrame([1, 2, 3]) >>> kdf.sort_index() 0 0 1 1 2 2 3 >>> kdf.insert(0, 'x', 4) >>> kdf.sort_index() x 0 0 4 1 1 4 2 2 4 3 >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> kdf.insert(1, 'y', [5, 6, 7]) >>> kdf.sort_index() x y 0 0 4 5 1 1 4 6 2 2 4 7 3 >>> kdf.insert(2, 'z', ks.Series([8, 9, 10])) >>> kdf.sort_index() x y z 0 0 4 5 8 1 1 4 6 9 2 2 4 7 10 3 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(loc, int): raise TypeError("loc must be int") assert 0 <= loc <= len(self.columns) assert allow_duplicates is False if not is_name_like_value(column): raise ValueError( '"column" should be a scalar value or tuple that contains scalar values' ) if is_name_like_tuple(column): if len(column) != len(self.columns.levels): # To be consistent with pandas raise ValueError('"column" must have length equal to number of column levels.') if column in self.columns: raise ValueError("cannot insert %s, already exists" % column) kdf = self.copy() kdf[column] = value columns = kdf.columns[:-1].insert(loc, kdf.columns[-1]) kdf = kdf[columns] self._update_internal_frame(kdf._internal) # TODO: add frep and axis parameter def shift(self, periods=1, fill_value=None) -> "DataFrame": """ Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input DataFrame, shifted. Examples -------- >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27 """ return self._apply_series_op( lambda kser: kser._shift(periods, fill_value), should_resolve=True ) # TODO: axis should support 1 or 'columns' either at this moment def diff(self, periods: int = 1, axis: Union[int, str] = 0) -> "DataFrame": """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : int, default 0 or 'index' Can only be set to 0 at the moment. Returns ------- diffed : DataFrame Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op(lambda kser: kser._diff(periods), should_resolve=True) # TODO: axis should support 1 or 'columns' either at this moment def nunique( self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False, rsd: float = 0.05, ) -> "Series": """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to Koalas and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to Koalas. Returns ------- The number of unique values per column as a Koalas Series. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(dropna=False) A 3 B 2 dtype: int64 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> df.nunique(approx=True) A 3 B 1 dtype: int64 """ from databricks.koalas.series import first_series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._internal.spark_frame.select( [F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + [ self._kser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels ] ) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context("compute.max_rows", 1): internal = self._internal.copy( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], index_names=[None], index_dtypes=[None], data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], data_dtypes=None, ) return first_series(DataFrame(internal).transpose()) def round(self, decimals=0) -> "DataFrame": """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. .. note:: If `decimals` is a Series, it is expected to be small, as all the data is loaded into the driver's memory. Returns ------- DataFrame See Also -------- Series.round Examples -------- >>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076], ... 'B':[0.992815, 0.645646, 0.149370], ... 'C':[0.173891, 0.577595, 0.491027]}, ... columns=['A', 'B', 'C'], ... index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1.0 0.17 second 0.0 1.0 0.58 third 0.9 0.0 0.49 """ if isinstance(decimals, ks.Series): decimals = { k if isinstance(k, tuple) else (k,): v for k, v in decimals._to_internal_pandas().items() } elif isinstance(decimals, dict): decimals = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()} elif isinstance(decimals, int): decimals = {k: decimals for k in self._internal.column_labels} else: raise ValueError("decimals must be an integer, a dict-like or a Series") def op(kser): label = kser._column_label if label in decimals: return F.round(kser.spark.column, decimals[label]).alias( kser._internal.data_spark_column_names[0] ) else: return kser return self._apply_series_op(op) def _mark_duplicates(self, subset=None, keep="first"): if subset is None: subset = self._internal.column_labels else: if is_name_like_tuple(subset): subset = [subset] elif is_name_like_value(subset): subset = [(subset,)] else: subset = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset] diff = set(subset).difference(set(self._internal.column_labels)) if len(diff) > 0: raise KeyError(", ".join([name_like_string(d) for d in diff])) group_cols = [self._internal.spark_column_name_for(label) for label in subset] sdf = self._internal.resolved_copy.spark_frame column = verify_temp_column_name(sdf, "__duplicated__") if keep == "first" or keep == "last": if keep == "first": ord_func = spark.functions.asc else: ord_func = spark.functions.desc window = ( Window.partitionBy(group_cols) .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME)) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) sdf = sdf.withColumn(column, F.row_number().over(window) > 1) elif not keep: window = Window.partitionBy(group_cols).rowsBetween( Window.unboundedPreceding, Window.unboundedFollowing ) sdf = sdf.withColumn(column, F.count("*").over(window) > 1) else: raise ValueError("'keep' only supports 'first', 'last' and False") return sdf, column def duplicated(self, subset=None, keep="first") -> "Series": """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series Examples -------- >>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]}, ... columns = ['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 1 1 1 2 1 1 1 3 3 4 5 >>> df.duplicated().sort_index() 0 False 1 True 2 True 3 False dtype: bool Mark duplicates as ``True`` except for the last occurrence. >>> df.duplicated(keep='last').sort_index() 0 True 1 True 2 False 3 False dtype: bool Mark all duplicates as ``True``. >>> df.duplicated(keep=False).sort_index() 0 True 1 True 2 True 3 False dtype: bool """ from databricks.koalas.series import first_series sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.select( self._internal.index_spark_columns + [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)] ) return first_series( DataFrame( InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_dtypes=self._internal.index_dtypes, column_labels=[None], # type: ignore data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)], ) ) ) # TODO: support other as DataFrame or array-like def dot(self, other: "Series") -> "Series": """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series It can also be called using ``self @ other`` in Python >= 3.5. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from databricks.koalas.config import option_context >>> with option_context( ... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True ... ): # doctest: +NORMALIZE_WHITESPACE ... kdf = ks.DataFrame({'a': range(1001)}) ... kser = ks.Series([2], index=['a']) ... kdf.dot(kser) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- other : Series The other object to compute the matrix product with. Returns ------- Series Return the matrix product between self and other as a Series. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> kdf = ks.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> kser = ks.Series([1, 1, 2, 1]) >>> kdf.dot(kser) 0 -4 1 5 dtype: int64 Note how shuffling of the objects does not change the result. >>> kser2 = kser.reindex([1, 0, 2, 3]) >>> kdf.dot(kser2) 0 -4 1 5 dtype: int64 >>> kdf @ kser2 0 -4 1 5 dtype: int64 >>> reset_option("compute.ops_on_diff_frames") """ if not isinstance(other, ks.Series): raise TypeError("Unsupported type {}".format(type(other).__name__)) else: return cast(ks.Series, other.dot(self.transpose())).rename(None) def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None) -> "DataFrame": """ Converts the existing DataFrame into a Koalas DataFrame. This method is monkey-patched into Spark's DataFrame and can be used to convert a Spark DataFrame into a Koalas DataFrame. If running on an existing Koalas DataFrame, the method returns itself. If a Koalas DataFrame is converted to a Spark DataFrame and then back to Koalas, it will lose the index information and the original index will be turned into a normal column. Parameters ---------- index_col: str or list of str, optional, default: None Index column of table in Spark. See Also -------- DataFrame.to_spark Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 >>> spark_df = df.to_spark() >>> spark_df DataFrame[col1: bigint, col2: bigint] >>> kdf = spark_df.to_koalas() >>> kdf col1 col2 0 1 3 1 2 4 We can specify the index columns. >>> kdf = spark_df.to_koalas(index_col='col1') >>> kdf # doctest: +NORMALIZE_WHITESPACE col2 col1 1 3 2 4 Calling to_koalas on a Koalas DataFrame simply returns itself. >>> df.to_koalas() col1 col2 0 1 3 1 2 4 """ if isinstance(self, DataFrame): return self else: assert isinstance(self, spark.DataFrame), type(self) from databricks.koalas.namespace import _get_index_map index_spark_columns, index_names = _get_index_map(self, index_col) internal = InternalFrame( spark_frame=self, index_spark_columns=index_spark_columns, index_names=index_names ) return DataFrame(internal) def cache(self) -> "CachedDataFrame": warnings.warn( "DataFrame.cache is deprecated as of DataFrame.spark.cache. " "Please use the API instead.", FutureWarning, ) return self.spark.cache() cache.__doc__ = SparkFrameMethods.cache.__doc__ def persist(self, storage_level=StorageLevel.MEMORY_AND_DISK) -> "CachedDataFrame": warnings.warn( "DataFrame.persist is deprecated as of DataFrame.spark.persist. " "Please use the API instead.", FutureWarning, ) return self.spark.persist(storage_level) persist.__doc__ = SparkFrameMethods.persist.__doc__ def hint(self, name: str, *parameters) -> "DataFrame": warnings.warn( "DataFrame.hint is deprecated as of DataFrame.spark.hint. " "Please use the API instead.", FutureWarning, ) return self.spark.hint(name, *parameters) hint.__doc__ = SparkFrameMethods.hint.__doc__ def to_table( self, name: str, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ) -> None: return self.spark.to_table(name, format, mode, partition_cols, index_col, **options) to_table.__doc__ = SparkFrameMethods.to_table.__doc__ def to_delta( self, path: str, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ) -> None: """ Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Delta Lake. See Also -------- read_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 Create a new Delta Lake table, partitioned by one column: >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') Partitioned by two columns: >>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country']) Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta: >>> df.to_delta('%s/to_delta/bar' % path, ... mode='overwrite', replaceWhere='date >= "2012-01-01"') """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore self.spark.to_spark_io( path=path, mode=mode, format="delta", partition_cols=partition_cols, index_col=index_col, **options, ) def to_parquet( self, path: str, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, compression: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, **options ) -> None: """ Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'} Compression codec to use when saving to file. If None is set, it uses the value specified in `spark.sql.parquet.compression.codec`. index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_parquet DataFrame.to_delta DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date') >>> df.to_parquet( ... '%s/to_parquet/foo.parquet' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore builder = self.to_spark(index_col=index_col).write.mode(mode) if partition_cols is not None: builder.partitionBy(partition_cols) builder._set_opts(compression=compression) builder.options(**options).format("parquet").save(path) def to_orc( self, path: str, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ) -> None: """ Write the DataFrame out as a ORC file or directory. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_orc DataFrame.to_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date') >>> df.to_orc( ... '%s/to_orc/foo.orc' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore self.spark.to_spark_io( path=path, mode=mode, format="orc", partition_cols=partition_cols, index_col=index_col, **options, ) def to_spark_io( self, path: Optional[str] = None, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Optional[Union[str, List[str]]] = None, index_col: Optional[Union[str, List[str]]] = None, **options ) -> None: return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options) to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__ def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame: return self.spark.frame(index_col) to_spark.__doc__ = SparkFrameMethods.__doc__ def to_pandas(self) -> pd.DataFrame: """ Return a pandas DataFrame. .. note:: This method should only be used if the resulting pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ return self._internal.to_pandas_frame.copy() # Alias to maintain backward compatibility with Spark def toPandas(self) -> pd.DataFrame: warnings.warn( "DataFrame.toPandas is deprecated as of DataFrame.to_pandas. " "Please use the API instead.", FutureWarning, ) return self.to_pandas() toPandas.__doc__ = to_pandas.__doc__ def assign(self, **kwargs) -> "DataFrame": """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable, Series or Index} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though Koalas doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15, ... temp_idx=df.index) >>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']] temp_c temp_f temp_k temp_idx Portland 17.0 62.6 290.15 Portland Berkeley 25.0 77.0 298.15 Berkeley Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in Koalas. In Koalas, all items are computed first, and then assigned. """ return self._assign(kwargs) def _assign(self, kwargs): assert isinstance(kwargs, dict) from databricks.koalas.indexes import MultiIndex from databricks.koalas.series import IndexOpsMixin for k, v in kwargs.items(): is_invalid_assignee = ( not (isinstance(v, (IndexOpsMixin, spark.Column)) or callable(v) or is_scalar(v)) ) or isinstance(v, MultiIndex) if is_invalid_assignee: raise TypeError( "Column assignment doesn't support type " "{0}".format(type(v).__name__) ) if callable(v): kwargs[k] = v(self) pairs = { (k if is_name_like_tuple(k) else (k,)): ( (v.spark.column, v.dtype) if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex) else (v, None) if isinstance(v, spark.Column) else (F.lit(v), None) ) for k, v in kwargs.items() } scols = [] data_dtypes = [] for label in self._internal.column_labels: for i in range(len(label)): if label[: len(label) - i] in pairs: scol, dtype = pairs[label[: len(label) - i]] scol = scol.alias(self._internal.spark_column_name_for(label)) break else: scol = self._internal.spark_column_for(label) dtype = self._internal.dtype_for(label) scols.append(scol) data_dtypes.append(dtype) column_labels = self._internal.column_labels.copy() for label, (scol, dtype) in pairs.items(): if label not in set(i[: len(label)] for i in self._internal.column_labels): scols.append(scol.alias(name_like_string(label))) column_labels.append(label) data_dtypes.append(dtype) level = self._internal.column_labels_level column_labels = [ tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels ] internal = self._internal.with_new_columns( scols, column_labels=column_labels, data_dtypes=data_dtypes ) return DataFrame(internal) @staticmethod def from_records( data: Union[np.array, List[tuple], dict, pd.DataFrame], index: Union[str, list, np.array] = None, exclude: list = None, columns: list = None, coerce_float: bool = False, nrows: int = None, ) -> "DataFrame": """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- df : DataFrame Examples -------- Use dict as input >>> ks.DataFrame.from_records({'A': [1, 2, 3]}) A 0 1 1 2 2 3 Use list of tuples as input >>> ks.DataFrame.from_records([(1, 2), (3, 4)]) 0 1 0 1 2 1 3 4 Use NumPy array as input >>> ks.DataFrame.from_records(np.eye(3)) 0 1 2 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ return DataFrame( pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows) ) def to_records(self, index=True, column_dtypes=None, index_dtypes=None) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args ) def copy(self, deep=None) -> "DataFrame": """ Make a copy of this object's indices and data. Parameters ---------- deep : None this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df_copy = df.copy() >>> df_copy x y z w 0 1 3 5 7 1 2 4 6 8 """ return DataFrame(self._internal) def dropna( self, axis=0, how="any", thresh=None, subset=None, inplace=False ) -> Optional["DataFrame"]: """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if thresh is None: if how is None: raise TypeError("must specify how or thresh") elif how not in ("any", "all"): raise ValueError("invalid how option: {h}".format(h=how)) if subset is not None: if isinstance(subset, str): labels = [(subset,)] # type: Optional[List[Tuple]] elif isinstance(subset, tuple): labels = [subset] else: labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] else: labels = None if axis == 0: if labels is not None: invalids = [label for label in labels if label not in self._internal.column_labels] if len(invalids) > 0: raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce( lambda x, y: x + y, [ F.when(self._kser_for(label).notna().spark.column, 1).otherwise(0) for label in labels ], F.lit(0), ) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == "any": pred = cnt == F.lit(len(labels)) elif how == "all": pred = cnt > F.lit(0) internal = self._internal.with_filter(pred) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) else: assert axis == 1 internal = self._internal.resolved_copy if labels is not None: if any(len(lbl) != internal.index_level for lbl in labels): raise ValueError( "The length of each subset must be the same as the index size." ) cond = reduce( lambda x, y: x | y, [ reduce( lambda x, y: x & y, [ scol == F.lit(l) for l, scol in zip(lbl, internal.index_spark_columns) ], ) for lbl in labels ], ) internal = internal.with_filter(cond) null_counts = [] for label in internal.column_labels: scol = internal.spark_column_for(label) if isinstance(internal.spark_type_for(label), (FloatType, DoubleType)): cond = scol.isNull() | F.isnan(scol) else: cond = scol.isNull() null_counts.append( F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label)) ) counts = internal.spark_frame.select(null_counts + [F.count("*")]).head() if thresh is not None: column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) >= int(thresh) ] elif how == "any": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) == counts[-1] ] elif how == "all": column_labels = [ label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0 ] kdf = self[column_labels] if inplace: self._update_internal_frame(kdf._internal) return None else: return kdf # TODO: add 'limit' when value parameter exists def fillna( self, value=None, method=None, axis=None, inplace=False, limit=None ) -> Optional["DataFrame"]: """Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is not None: if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value).__name__) if limit is not None: raise ValueError("limit parameter for value is not support now") if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v).__name__) value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()} def op(kser): label = kser._column_label for k, v in value.items(): if k == label[: len(k)]: return kser._fillna( value=value[k], method=method, axis=axis, limit=limit ) else: return kser else: op = lambda kser: kser._fillna(value=value, method=method, axis=axis, limit=limit) elif method is not None: op = lambda kser: kser._fillna(value=value, method=method, axis=axis, limit=limit) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") kdf = self._apply_series_op(op, should_resolve=(method is not None)) inplace = validate_bool_kwarg(inplace, "inplace") if inplace: self._update_internal_frame(kdf._internal, requires_same_anchor=False) return None else: return kdf def replace( self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method="pad", ) -> Optional["DataFrame"]: """ Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, list, tuple or dict Value to be replaced. value : int, float, string, list or tuple Value to use to replace holes. The replacement value must be an int, float, or string. If value is a list or tuple, value should be of the same length with to_replace. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame Object after replacement. Examples -------- >>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'], ... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']}, ... columns=['name', 'weapon']) >>> df name weapon 0 Ironman Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash Scalar `to_replace` and `value` >>> df.replace('Ironman', 'War-Machine') name weapon 0 War-Machine Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash List like `to_replace` and `value` >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True) >>> df name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Mjolnir 3 Hulk Smash Dicts can be used to specify different replacement values for different existing values To use a dict in this way the value parameter should be None >>> df.replace({'Mjolnir': 'Stormbuster'}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Dict can specify that different values should be replaced in different columns The value parameter should not be None in this case >>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster') name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Nested dictionaries The value parameter should be None to use a nested dict in this way >>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}}) name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash """ if method != "pad": raise NotImplementedError("replace currently works only for method='pad") if limit is not None: raise NotImplementedError("replace currently works only when limit=None") if regex is not False: raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, "inplace") if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)): raise TypeError("Unsupported type {}".format(type(value).__name__)) if to_replace is not None and not isinstance( to_replace, (int, float, str, list, tuple, dict) ): raise TypeError("Unsupported type {}".format(type(to_replace).__name__)) if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)): if len(value) != len(to_replace): raise ValueError("Length of to_replace and value must be same") if isinstance(to_replace, dict) and ( value is not None or all(isinstance(i, dict) for i in to_replace.values()) ): def op(kser): if kser.name in to_replace: return kser.replace(to_replace=to_replace[kser.name], value=value, regex=regex) else: return kser else: op = lambda kser: kser.replace(to_replace=to_replace, value=value, regex=regex) kdf = self._apply_series_op(op) if inplace: self._update_internal_frame(kdf._internal) return None else: return kdf def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame": """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise ValueError( "List-like value are not supported for 'lower' and 'upper' at the " + "moment" ) if lower is None and upper is None: return self return self._apply_series_op(lambda kser: kser.clip(lower=lower, upper=upper)) def head(self, n: int = 5) -> "DataFrame": """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ if n < 0: n = len(self) + n if n <= 0: return DataFrame(self._internal.with_filter(F.lit(False))) else: sdf = self._internal.resolved_copy.spark_frame if get_option("compute.ordered_head"): sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) return DataFrame(self._internal.with_new_sdf(sdf.limit(n))) def pivot_table( self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None ) -> "DataFrame": """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : column (string) or list of columns If an array is passed, it must be the same length as the data. The list should contain string. columns : column Columns used in the pivot operation. Only one column is supported and it should be a string. aggfunc : function (string), dict, default mean If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with. Returns ------- table : DataFrame Examples -------- >>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}, ... columns=['A', 'B', 'C', 'D', 'E']) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum') >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5 two 7.0 6 foo one 4.0 1 two NaN 6 We can also fill missing values using the `fill_value` parameter. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum', fill_value=0) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 We can also calculate multiple types of aggregations for any given value column. >>> table = df.pivot_table(values=['D'], index =['C'], ... columns="A", aggfunc={'D': 'mean'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D A bar foo C large 5.5 2.000000 small 5.5 2.333333 The next example aggregates on multiple values. >>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'], ... aggfunc={'D': 'mean', 'E': 'sum'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D E A bar foo bar foo C large 5.5 2.000000 15 9 small 5.5 2.333333 17 13 """ if not is_name_like_value(columns): raise ValueError("columns should be one column name.") if not is_name_like_value(values) and not ( isinstance(values, list) and all(is_name_like_value(v) for v in values) ): raise ValueError("values should be one column or list of columns.") if not isinstance(aggfunc, str) and ( not isinstance(aggfunc, dict) or not all( is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items() ) ): raise ValueError( "aggfunc must be a dict mapping from column name " "to aggregate functions (string)." ) if isinstance(aggfunc, dict) and index is None: raise NotImplementedError( "pivot_table doesn't support aggfunc" " as dict and without index." ) if isinstance(values, list) and index is None: raise NotImplementedError("values can't be a list without index.") if columns not in self.columns: raise ValueError("Wrong columns {}.".format(name_like_string(columns))) if not is_name_like_tuple(columns): columns = (columns,) if isinstance(values, list): values = [col if is_name_like_tuple(col) else (col,) for col in values] if not all( isinstance(self._internal.spark_type_for(col), NumericType) for col in values ): raise TypeError("values should be a numeric type.") else: values = values if is_name_like_tuple(values) else (values,) if not isinstance(self._internal.spark_type_for(values), NumericType): raise TypeError("values should be a numeric type.") if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(value), aggfunc ) ) for value in values ] else: agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(values), aggfunc ) ) ] elif isinstance(aggfunc, dict): aggfunc = { key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items() } agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value) ) for key, value in aggfunc.items() ] agg_columns = [key for key, _ in aggfunc.items()] if set(agg_columns) != set(values): raise ValueError("Columns in aggfunc must be the same as values.") sdf = self._internal.resolved_copy.spark_frame if index is None: sdf = ( sdf.groupBy() .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) elif isinstance(index, list): index = [label if is_name_like_tuple(label) else (label,) for label in index] sdf = ( sdf.groupBy([self._internal.spark_column_name_for(label) for label in index]) .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) else: raise ValueError("index should be a None or a list of columns.") if fill_value is not None and isinstance(fill_value, (int, float)): sdf = sdf.fillna(fill_value) if index is not None: index_columns = [self._internal.spark_column_name_for(label) for label in index] index_dtypes = [self._internal.dtype_for(label) for label in index] if isinstance(values, list): data_columns = [column for column in sdf.columns if column not in index_columns] if len(values) > 1: # If we have two values, Spark will return column's name # in this format: column_values, where column contains # their values in the DataFrame and values is # the column list passed to the pivot_table(). # E.g. if column is b and values is ['b','e'], # then ['2_b', '2_e', '3_b', '3_e']. # We sort the columns of Spark DataFrame by values. data_columns.sort(key=lambda x: x.split("_", 1)[1]) sdf = sdf.select(index_columns + data_columns) column_name_to_index = dict( zip(self._internal.data_spark_column_names, self._internal.column_labels) ) column_labels = [ tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]]) for name in data_columns ] column_label_names = ([None] * column_labels_level(values)) + [columns] internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_dtypes=index_dtypes, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, # type: ignore ) kdf = DataFrame(internal) # type: "DataFrame" else: column_labels = [tuple(list(values[0]) + [column]) for column in data_columns] column_label_names = ([None] * len(values[0])) + [columns] internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_dtypes=index_dtypes, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, # type: ignore ) kdf = DataFrame(internal) else: internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_columns], index_names=index, index_dtypes=index_dtypes, column_label_names=[columns], ) kdf = DataFrame(internal) else: if isinstance(values, list): index_values = values[-1] else: index_values = values index_map = OrderedDict() # type: Dict[str, Optional[Tuple]] for i, index_value in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, F.lit(index_value)) index_map[colname] = None internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()], index_names=list(index_map.values()), column_label_names=[columns], ) kdf = DataFrame(internal) kdf_columns = kdf.columns if isinstance(kdf_columns, pd.MultiIndex): kdf.columns = kdf_columns.set_levels( kdf_columns.levels[-1].astype( spark_type_to_pandas_dtype(self._kser_for(columns).spark.data_type) ), level=-1, ) else: kdf.columns = kdf_columns.astype( spark_type_to_pandas_dtype(self._kser_for(columns).spark.data_type) ) return kdf def pivot(self, index=None, columns=None, values=None) -> "DataFrame": """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optional Column to use to make new frame's index. If None, uses existing index. columns : string Column to use to make new frame's columns. values : string, object or a list of the previous Column(s) to use for populating new frame's values. Returns ------- DataFrame Returns reshaped DataFrame. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. Examples -------- >>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}, ... columns=['foo', 'bar', 'baz', 'zoo']) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE bar A B C 0 1.0 NaN NaN 1 NaN 2.0 NaN 2 NaN NaN 3.0 3 4.0 NaN NaN 4 NaN 5.0 NaN 5 NaN NaN 6.0 Notice that, unlike pandas raises an ValueError when duplicated values are found, Koalas' pivot still works with its first value it meets during operation because pivot is an expensive operation and it is preferred to permissively execute over failing fast when processing large data. >>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz']) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1.0 NaN NaN two NaN 3.0 4.0 It also support multi-index and multi-index column. >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')]) >>> df = df.set_index(('a', 'bar'), append=True) >>> df # doctest: +NORMALIZE_WHITESPACE a b foo baz (a, bar) 0 A one 1 1 A one 2 2 B two 3 3 C two 4 >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index() ... # doctest: +NORMALIZE_WHITESPACE ('a', 'foo') one two (a, bar) 0 A 1.0 NaN 1 A 2.0 NaN 2 B NaN 3.0 3 C NaN 4.0 """ if columns is None: raise ValueError("columns should be set.") if values is None: raise ValueError("values should be set.") should_use_existing_index = index is not None if should_use_existing_index: df = self index = [index] else: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: self._internal.index_level] df = df.pivot_table(index=index, columns=columns, values=values, aggfunc="first") if should_use_existing_index: return df else: internal = df._internal.copy(index_names=self._internal.index_names) return DataFrame(internal) @property def columns(self) -> pd.Index: """The column labels of the DataFrame.""" names = [ name if name is None or len(name) > 1 else name[0] for name in self._internal.column_label_names ] if self._internal.column_labels_level > 1: columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names) else: columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0]) return columns @columns.setter def columns(self, columns) -> None: if isinstance(columns, pd.MultiIndex): column_labels = columns.tolist() else: column_labels = [ col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns ] if len(self._internal.column_labels) != len(column_labels): raise ValueError( "Length mismatch: Expected axis has {} elements, " "new values have {} elements".format( len(self._internal.column_labels), len(column_labels) ) ) if isinstance(columns, pd.Index): column_label_names = [ name if is_name_like_tuple(name) else (name,) for name in columns.names ] # type: Optional[List] else: column_label_names = None ksers = [ self._kser_for(label).rename(name) for label, name in zip(self._internal.column_labels, column_labels) ] self._update_internal_frame( self._internal.with_new_columns(ksers, column_label_names=column_label_names) ) @property def dtypes(self) -> pd.Series: """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series( [self._kser_for(label).dtype for label in self._internal.column_labels], index=pd.Index( [label if len(label) > 1 else label[0] for label in self._internal.column_labels] ), ) def spark_schema(self, index_col: Optional[Union[str, List[str]]] = None) -> StructType: warnings.warn( "DataFrame.spark_schema is deprecated as of DataFrame.spark.schema. " "Please use the API instead.", FutureWarning, ) return self.spark.schema(index_col) spark_schema.__doc__ = SparkFrameMethods.schema.__doc__ def print_schema(self, index_col: Optional[Union[str, List[str]]] = None) -> None: warnings.warn( "DataFrame.print_schema is deprecated as of DataFrame.spark.print_schema. " "Please use the API instead.", FutureWarning, ) return self.spark.print_schema(index_col) print_schema.__doc__ = SparkFrameMethods.print_schema.__doc__ def select_dtypes(self, include=None, exclude=None) -> "DataFrame": """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' and 'date'. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty >>> df = ks.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes() Traceback (most recent call last): ... ValueError: at least one of include or exclude must be nonempty * If ``include`` and ``exclude`` have overlapping elements >>> df = ks.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes(include='a', exclude='a') Traceback (most recent call last): ... ValueError: include and exclude overlap on {'a'} Notes ----- * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` Examples -------- >>> df = ks.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3, ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 True 1.0 a 1 2 False 2.0 b 2 1 True 1.0 a 3 2 False 2.0 b 4 1 True 1.0 a 5 2 False 2.0 b >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64'], exclude=['int']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c d 0 True 1.0 a 1 False 2.0 b 2 True 1.0 a 3 False 2.0 b 4 True 1.0 a 5 False 2.0 b Spark SQL DDL type strings can be used as well. >>> df.select_dtypes(exclude=['string']) a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 """ from pyspark.sql.types import _parse_datatype_string if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () if not any((include, exclude)): raise ValueError("at least one of include or exclude must be " "nonempty") # can't both include AND exclude! if set(include).intersection(set(exclude)): raise ValueError( "include and exclude overlap on {inc_ex}".format( inc_ex=set(include).intersection(set(exclude)) ) ) # Handle Spark types include_spark_type = [] for inc in include: try: include_spark_type.append(_parse_datatype_string(inc)) except: pass exclude_spark_type = [] for exc in exclude: try: exclude_spark_type.append(_parse_datatype_string(exc)) except: pass # Handle pandas types include_numpy_type = [] for inc in include: try: include_numpy_type.append(infer_dtype_from_object(inc)) except: pass exclude_numpy_type = [] for exc in exclude: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except: pass column_labels = [] for label in self._internal.column_labels: if len(include) > 0: should_include = ( infer_dtype_from_object(self._kser_for(label).dtype.name) in include_numpy_type or self._internal.spark_type_for(label) in include_spark_type ) else: should_include = not ( infer_dtype_from_object(self._kser_for(label).dtype.name) in exclude_numpy_type or self._internal.spark_type_for(label) in exclude_spark_type ) if should_include: column_labels.append(label) return DataFrame( self._internal.with_new_columns([self._kser_for(label) for label in column_labels]) ) def droplevel(self, level, axis=0) -> "DataFrame": """ Return DataFrame with requested index / column level(s) removed. Parameters ---------- level: int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis: {0 or ‘index’, 1 or ‘columns’}, default 0 Returns ------- DataFrame with requested index / column level(s) removed. Examples -------- >>> df = ks.DataFrame( ... [[3, 4], [7, 8], [11, 12]], ... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]), ... ) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ axis = validate_axis(axis) if axis == 0: if not isinstance(level, (tuple, list)): # huh? level = [level] index_names = self.index.names nlevels = self._internal.index_level int_level = set() for n in level: if isinstance(n, int): if n < 0: n = n + nlevels if n < 0: raise IndexError( "Too many levels: Index has only {} levels, " "{} is not a valid level number".format(nlevels, (n - nlevels)) ) if n >= nlevels: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( nlevels, (n + 1) ) ) else: if n not in index_names: raise KeyError("Level {} not found".format(n)) n = index_names.index(n) int_level.add(n) if len(level) >= nlevels: raise ValueError( "Cannot remove {} levels from an index with {} levels: " "at least one level must be left.".format(len(level), nlevels) ) index_spark_columns, index_names, index_dtypes = zip( *[ item for i, item in enumerate( zip( self._internal.index_spark_columns, self._internal.index_names, self._internal.index_dtypes, ) ) if i not in int_level ] ) internal = self._internal.copy( index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_dtypes=list(index_dtypes), ) return DataFrame(internal) else: kdf = self.copy() kdf.columns = kdf.columns.droplevel(level) return kdf def drop( self, labels=None, axis=1, columns: Union[Any, Tuple, List[Any], List[Tuple]] = None ) -> "DataFrame": """ Drop specified labels from columns. Remove columns by specifying label names and axis=1 or columns. When specifying both labels and columns, only labels will be dropped. Removing rows is yet to be implemented. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {1 or 'columns'}, default 1 .. dropna currently only works for axis=1 'columns' axis=0 is yet to be implemented. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('x', axis=1) y z w 0 3 5 7 1 4 6 8 >>> df.drop(['y', 'z'], axis=1) x w 0 1 7 1 2 8 >>> df.drop(columns=['y', 'z']) x w 0 1 7 1 2 8 Also support for MultiIndex >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df # doctest: +NORMALIZE_WHITESPACE a b x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE b z w 0 5 7 1 6 8 Notes ----- Currently only axis = 1 is supported in this function, axis = 0 is yet to be implemented. """ if labels is not None: axis = validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if is_name_like_tuple(columns): columns = [columns] elif is_name_like_value(columns): columns = [(columns,)] else: columns = [col if is_name_like_tuple(col) else (col,) for col in columns] drop_column_labels = set( label for label in self._internal.column_labels for col in columns if label[: len(col)] == col ) if len(drop_column_labels) == 0: raise KeyError(columns) cols, labels = zip( *( (column, label) for column, label in zip( self._internal.data_spark_column_names, self._internal.column_labels ) if label not in drop_column_labels ) ) internal = self._internal.with_new_columns([self._kser_for(label) for label in labels]) return DataFrame(internal) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def _sort( self, by: List[Column], ascending: Union[bool, List[bool]], inplace: bool, na_position: str ): if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError( "Length of ascending ({}) != length of by ({})".format(len(ascending), len(by)) ) if na_position not in ("first", "last"): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, "first"): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, "last"): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, "first"): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, "last"): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)] sdf = self._internal.resolved_copy.spark_frame.sort(*(by + [NATURAL_ORDER_COLUMN_NAME])) kdf = DataFrame(self._internal.with_new_sdf(sdf)) # type: DataFrame if inplace: self._update_internal_frame(kdf._internal) return None else: return kdf def sort_values( self, by: Union[Any, List[Any], Tuple, List[Tuple]], ascending: Union[bool, List[bool]] = True, inplace: bool = False, na_position: str = "last", ) -> Optional["DataFrame"]: """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df col1 col2 col3 0 A 2 0 1 B 9 9 2 None 8 4 3 D 7 2 4 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 B 9 9 4 C 4 3 3 D 7 2 2 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 3 D 7 2 4 C 4 3 1 B 9 9 0 A 2 0 2 None 8 4 Sort by multiple columns >>> df = ks.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ inplace = validate_bool_kwarg(inplace, "inplace") if is_name_like_value(by): by = [by] else: assert is_list_like(by), type(by) new_by = [] for colname in by: ser = self[colname] if not isinstance(ser, ks.Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(colname) ) new_by.append(ser.spark.column) return self._sort(by=new_by, ascending=ascending, inplace=inplace, na_position=na_position) def sort_index( self, axis: int = 0, level: Optional[Union[int, List[int]]] = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = "last", ) -> Optional["DataFrame"]: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None Koalas does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan]) >>> df.sort_index() A a 1.0 b 2.0 NaN NaN >>> df.sort_index(ascending=False) A b 2.0 a 1.0 NaN NaN >>> df.sort_index(na_position='first') A NaN NaN a 1.0 b 2.0 >>> df.sort_index(inplace=True) >>> df A a 1.0 b 2.0 NaN NaN >>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]}, ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], ... columns=['A', 'B']) >>> df.sort_index() A B a 0 3 0 1 2 1 b 0 1 2 1 0 3 >>> df.sort_index(level=1) # doctest: +SKIP A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(level=[1, 0]) A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("No other axis than 0 are supported at the moment") if kind is not None: raise NotImplementedError( "Specifying the sorting algorithm is not supported at the moment." ) if level is None or (is_list_like(level) and len(level) == 0): # type: ignore by = self._internal.index_spark_columns elif is_list_like(level): by = [self._internal.index_spark_columns[l] for l in level] # type: ignore else: by = [self._internal.index_spark_columns[level]] # type: ignore return self._sort(by=by, ascending=ascending, inplace=inplace, na_position=na_position) def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame": """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. Returns ------- DataFrame DataFrame with levels swapped in MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_arrays( ... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size']) >>> midx # doctest: +SKIP MultiIndex([( 'red', 1, 's'), ('blue', 2, 'm')], names=['color', 'number', 'size']) Swap levels in a MultiIndex on index. >>> kdf = ks.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx) >>> kdf # doctest: +NORMALIZE_WHITESPACE x y color number size red 1 s 5 5 blue 2 m 6 6 >>> kdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 >>> kdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE x y number color size 1 red s 5 5 2 blue m 6 6 >>> kdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE x y color size number red s 1 5 5 blue m 2 6 6 Swap levels in a MultiIndex on columns. >>> kdf = ks.DataFrame({'x': [5, 6], 'y':[5, 6]}) >>> kdf.columns = midx >>> kdf color red blue number 1 2 size s m 0 5 5 1 6 6 >>> kdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> kdf.swaplevel(axis=1) color red blue size s m number 1 2 0 5 5 1 6 6 >>> kdf.swaplevel(0, 1, axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 >>> kdf.swaplevel('number', 'color', axis=1) number 1 2 color red blue size s m 0 5 5 1 6 6 """ axis = validate_axis(axis) if axis == 0: internal = self._swaplevel_index(i, j) else: assert axis == 1 internal = self._swaplevel_columns(i, j) return DataFrame(internal) def swapaxes(self, i: Union[str, int], j: Union[str, int], copy: bool = True) -> "DataFrame": """ Interchange axes and swap values axes appropriately. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from databricks.koalas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ks.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1) Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Parameters ---------- i: {0 or 'index', 1 or 'columns'}. The axis to swap. j: {0 or 'index', 1 or 'columns'}. The axis to swap. copy : bool, default True. Returns ------- DataFrame Examples -------- >>> kdf = ks.DataFrame( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c'] ... ) >>> kdf a b c x 1 2 3 y 4 5 6 z 7 8 9 >>> kdf.swapaxes(i=1, j=0) x y z a 1 4 7 b 2 5 8 c 3 6 9 >>> kdf.swapaxes(i=1, j=1) a b c x 1 2 3 y 4 5 6 z 7 8 9 """ assert copy is True i = validate_axis(i) j = validate_axis(j) return self.copy() if i == j else self.transpose() def _swaplevel_columns(self, i, j) -> InternalFrame: assert isinstance(self.columns, pd.MultiIndex) for index in (i, j): if not isinstance(index, int) and index not in self.columns.names: raise KeyError("Level %s not found" % index) i = i if isinstance(i, int) else self.columns.names.index(i) j = j if isinstance(j, int) else self.columns.names.index(j) for index in (i, j): if index >= len(self.columns) or index < -len(self.columns): raise IndexError( "Too many levels: Columns have only %s levels, " "%s is not a valid level number" % (self._internal.index_level, index) ) column_label_names = self._internal.column_label_names.copy() column_label_names[i], column_label_names[j], = ( column_label_names[j], column_label_names[i], ) column_labels = self._internal._column_labels column_label_list = [list(label) for label in column_labels] for label_list in column_label_list: label_list[i], label_list[j] = label_list[j], label_list[i] column_labels = [tuple(x) for x in column_label_list] internal = self._internal.copy( column_label_names=list(column_label_names), column_labels=list(column_labels) ) return internal def _swaplevel_index(self, i, j) -> InternalFrame: assert isinstance(self.index, ks.MultiIndex) for index in (i, j): if not isinstance(index, int) and index not in self.index.names: raise KeyError("Level %s not found" % index) i = i if isinstance(i, int) else self.index.names.index(i) j = j if isinstance(j, int) else self.index.names.index(j) for index in (i, j): if index >= self._internal.index_level or index < -self._internal.index_level: raise IndexError( "Too many levels: Index has only %s levels, " "%s is not a valid level number" % (self._internal.index_level, index) ) index_map = list( zip( self._internal.index_spark_columns, self._internal.index_names, self._internal.index_dtypes, ) ) index_map[i], index_map[j], = index_map[j], index_map[i] index_spark_columns, index_names, index_dtypes = zip(*index_map) internal = self._internal.copy( index_spark_columns=list(index_spark_columns), index_names=list(index_names), index_dtypes=list(index_dtypes), ) return internal # TODO: add keep = First def nlargest(self, n: int, columns: "Any") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in pandas. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 """ return self.sort_values(by=columns, ascending=False).head(n=n) # TODO: add keep = First def nsmallest(self, n: int, columns: "Any") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 """ return self.sort_values(by=columns, ascending=True).head(n=n) def isin(self, values) -> "DataFrame": """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns)) ) data_spark_columns = [] if isinstance(values, dict): for i, col in enumerate(self.columns): if col in values: data_spark_columns.append( self._internal.spark_column_for(self._internal.column_labels[i]) .isin(values[col]) .alias(self._internal.data_spark_column_names[i]) ) else: data_spark_columns.append( F.lit(False).alias(self._internal.data_spark_column_names[i]) ) elif is_list_like(values): data_spark_columns += [ self._internal.spark_column_for(label) .isin(list(values)) .alias(self._internal.spark_column_name_for(label)) for label in self._internal.column_labels ] else: raise TypeError("Values should be iterable, Series, DataFrame or dict.") return DataFrame(self._internal.with_new_columns(data_spark_columns)) @property def shape(self) -> Tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge( self, right: "DataFrame", how: str = "inner", on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None, left_on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None, right_on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ("_x", "_y"), ) -> "DataFrame": """ Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the left DataFrame - All involved indices if merged using the indices of both DataFrames e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will be an index (x, a, b) Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {'left', 'right', 'outer', 'inner'}, default 'inner' left: use only keys from left frame, similar to a SQL left outer join; not preserve key order unlike pandas. right: use only keys from right frame, similar to a SQL right outer join; not preserve key order unlike pandas. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; not preserve the order of the left keys unlike pandas. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on: Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on: Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- DataFrame.join : Join columns of another DataFrame. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey') >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS lkey value_x rkey value_y ...bar 2 bar 6 ...baz 3 baz 7 ...foo 1 foo 5 ...foo 1 foo 8 ...foo 5 foo 5 ...foo 5 foo 8 >>> left_kdf = ks.DataFrame({'A': [1, 2]}) >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_kdf.merge(right_kdf, left_index=True, right_index=True).sort_index() A B 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left').sort_index() A B 0 1 None 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right').sort_index() A B 1 2.0 x 2 NaN y >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer').sort_index() A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ def to_list(os: Optional[Union[Any, List[Any], Tuple, List[Tuple]]]) -> List[Tuple]: if os is None: return [] elif is_name_like_tuple(os): return [os] # type: ignore elif is_name_like_value(os): return [(os,)] else: return [o if is_name_like_tuple(o) else (o,) for o in os] if isinstance(right, ks.Series): right = right.to_frame() if on: if left_on or right_on: raise ValueError( 'Can only pass argument "on" OR "left_on" and "right_on", ' "not a combination of both." ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: # TODO: need special handling for multi-index. if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list( map(right._internal.spark_column_name_for, to_list(right_on)) ) if left_key_names and not right_key_names: raise ValueError("Must pass right_on or right_index=True") if right_key_names and not left_key_names: raise ValueError("Must pass left_on or left_index=True") if not left_key_names and not right_key_names: common = list(self.columns.intersection(right.columns)) if len(common) == 0: raise ValueError( "No common columns to perform merge on. Merge options: " "left_on=None, right_on=None, left_index=False, right_index=False" ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if len(left_key_names) != len(right_key_names): raise ValueError("len(left_keys) must equal len(right_keys)") # We should distinguish the name to avoid ambiguous column name after merging. right_prefix = "__right_" right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names] how = validate_how(how) def resolve(internal, side): rename = lambda col: "__{}_{}".format(side, col) internal = internal.resolved_copy sdf = internal.spark_frame sdf = sdf.select( [ scol_for(sdf, col).alias(rename(col)) for col in sdf.columns if col not in HIDDEN_COLUMNS ] + list(HIDDEN_COLUMNS) ) return internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.index_spark_column_names ], data_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.data_spark_column_names ], ) left_internal = self._internal.resolved_copy right_internal = resolve(right._internal, "right") left_table = left_internal.spark_frame.alias("left_table") right_table = right_internal.spark_frame.alias("right_table") left_key_columns = [scol_for(left_table, label) for label in left_key_names] right_key_columns = [scol_for(right_table, label) for label in right_key_names] join_condition = reduce( lambda x, y: x & y, [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)], ) joined_table = left_table.join(right_table, join_condition, how=how) # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels) exprs = [] data_columns = [] column_labels = [] left_scol_for = lambda label: scol_for( left_table, left_internal.spark_column_name_for(label) ) right_scol_for = lambda label: scol_for( right_table, right_internal.spark_column_name_for(label) ) for label in left_internal.column_labels: col = left_internal.spark_column_name_for(label) scol = left_scol_for(label) if label in duplicate_columns: spark_column_name = left_internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and (right_prefix + spark_column_name) in right_key_names ): right_scol = right_scol_for(label) if how == "right": scol = right_scol.alias(col) elif how == "full": scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = col + left_suffix scol = scol.alias(col) label = tuple([str(label[0]) + left_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right_internal.column_labels: # recover `right_prefix` here. col = right_internal.spark_column_name_for(label)[len(right_prefix) :] scol = right_scol_for(label).alias(col) if label in duplicate_columns: spark_column_name = left_internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and (right_prefix + spark_column_name) in right_key_names ): continue else: col = col + right_suffix scol = scol.alias(col) label = tuple([str(label[0]) + right_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = left_internal.index_spark_columns right_index_scols = right_internal.index_spark_columns # Retain indices if they are used for joining if left_index: if right_index: if how in ("inner", "left"): exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names elif how == "right": exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names else: index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names for col, left_scol, right_scol in zip( index_spark_column_names, left_index_scols, right_index_scols ): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) else: exprs.extend(right_index_scols) index_spark_column_names = right_internal.index_spark_column_names index_names = right_internal.index_names elif right_index: exprs.extend(left_index_scols) index_spark_column_names = left_internal.index_spark_column_names index_names = left_internal.index_names else: index_spark_column_names = [] index_names = [] selected_columns = joined_table.select(*exprs) internal = InternalFrame( spark_frame=selected_columns, index_spark_columns=[ scol_for(selected_columns, col) for col in index_spark_column_names ], index_names=index_names, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns], ) return DataFrame(internal) def join( self, right: "DataFrame", on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", ) -> "DataFrame": """ Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `right`, otherwise joins index-on-index. If multiple values given, the `right` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how: {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use `left` frame’s index (or column if on is specified). * right: use `right`’s index. * outer: form union of `left` frame’s index (or column if on is specified) with right’s index, and sort it. lexicographically. * inner: form intersection of `left` frame’s index (or column if on is specified) with `right`’s index, preserving the order of the `left`’s one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from `right` frame's overlapping columns. Returns ------- DataFrame A dataframe containing columns from both the `left` and `right`. See Also -------- DataFrame.merge: For column(s)-on-columns(s) operations. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Notes ----- Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame objects. Examples -------- >>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], ... 'A': ['A0', 'A1', 'A2', 'A3']}, ... columns=['key', 'A']) >>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}, ... columns=['key', 'B']) >>> kdf1 key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 >>> kdf2 key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right') >>> join_kdf.sort_values(by=join_kdf.columns) key_left A key_right B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 None None If we want to join using the key columns, we need to set key to be the index in both df and right. The joined DataFrame will have key as its index. >>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key')) >>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 None Another option to join using the key columns is to use the on parameter. DataFrame.join always uses right’s index but we can use any column in df. This method not preserve the original DataFrame’s index in the result unlike pandas. >>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key') >>> join_kdf.index Int64Index([0, 1, 2, 3], dtype='int64') """ if isinstance(right, ks.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if len(common) > 0 and not lsuffix and not rsuffix: raise ValueError( "columns overlap but no suffix specified: " "{rename}".format(rename=common) ) need_set_index = False if on: if not is_list_like(on): on = [on] # type: ignore if len(on) != right._internal.index_level: raise ValueError( 'len(left_on) must equal the number of levels in the index of "right"' ) need_set_index = len(set(on) & set(self.index.names)) == 0 if need_set_index: self = self.set_index(on) join_kdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ) return join_kdf.reset_index() if need_set_index else join_kdf def append( self, other: "DataFrame", ignore_index: bool = False, verify_integrity: bool = False, sort: bool = False, ) -> "DataFrame": """ Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default False Currently not supported. Returns ------- appended : DataFrame Examples -------- >>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df.append(df) A B 0 1 2 1 3 4 0 1 2 1 3 4 >>> df.append(df, ignore_index=True) A B 0 1 2 1 3 4 2 1 2 3 3 4 """ if isinstance(other, ks.Series): raise ValueError("DataFrames.append() does not support appending Series to DataFrames") if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if not ignore_index: index_scols = self._internal.index_spark_columns if len(index_scols) != other._internal.index_level: raise ValueError("Both DataFrames have to have the same number of index levels") if verify_integrity and len(index_scols) > 0: if ( self._internal.spark_frame.select(index_scols) .intersect( other._internal.spark_frame.select(other._internal.index_spark_columns) ) .count() ) > 0: raise ValueError("Indices have overlapping values") # Lazy import to avoid circular dependency issues from databricks.koalas.namespace import concat return cast(DataFrame, concat([self, other], ignore_index=ignore_index)) # TODO: add 'filter_func' and 'errors' parameter def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True) -> None: """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. Returns ------- None : method directly changes calling object See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. DataFrame.join : Join columns of another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df.sort_index() A B 0 a d 1 b y 2 c e If `other` contains None the corresponding values are not updated in the original dataframe. >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4.0 1 2 500.0 2 3 6.0 """ if join != "left": raise NotImplementedError("Only left join is supported") if isinstance(other, ks.Series): other = other.to_frame() update_columns = list( set(self._internal.column_labels).intersection(set(other._internal.column_labels)) ) update_sdf = self.join( other[update_columns], rsuffix="_new" )._internal.resolved_copy.spark_frame data_dtypes = self._internal.data_dtypes.copy() for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for( update_sdf, other._internal.spark_column_name_for(column_labels) + "_new" ) if overwrite: update_sdf = update_sdf.withColumn( column_name, F.when(new_col.isNull(), old_col).otherwise(new_col) ) else: update_sdf = update_sdf.withColumn( column_name, F.when(old_col.isNull(), new_col).otherwise(old_col) ) data_dtypes[self._internal.column_labels.index(column_labels)] = None # TODO: dtype? sdf = update_sdf.select( [scol_for(update_sdf, col) for col in self._internal.spark_column_names] + list(HIDDEN_COLUMNS) ) internal = self._internal.with_new_sdf(sdf, data_dtypes=data_dtypes) self._update_internal_frame(internal, requires_same_anchor=False) def sample( self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None, ) -> "DataFrame": """ Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError( "Function sample currently does not support specifying " "exact number of items to return. Use frac instead." ) if frac is None: raise ValueError("frac must be specified.") sdf = self._internal.resolved_copy.spark_frame.sample( withReplacement=replace, fraction=frac, seed=random_state ) return DataFrame(self._internal.with_new_sdf(sdf)) def astype(self, dtype) -> "DataFrame": """ Cast a Koalas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire Koalas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ applied = [] if is_dict_like(dtype): for col_name in dtype.keys(): if col_name not in self.columns: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument." ) for col_name, col in self.items(): if col_name in dtype: applied.append(col.astype(dtype=dtype[col_name])) else: applied.append(col) else: for col_name, col in self.items(): applied.append(col.astype(dtype=dtype)) return DataFrame(self._internal.with_new_columns(applied)) def add_prefix(self, prefix) -> "DataFrame": """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(prefix, str) return self._apply_series_op( lambda kser: kser.rename(tuple([prefix + i for i in kser._column_label])) ) def add_suffix(self, suffix) -> "DataFrame": """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(suffix, str) return self._apply_series_op( lambda kser: kser.rename(tuple([i + suffix for i in kser._column_label])) ) # TODO: include, and exclude should be implemented. def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame": """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75] A list of percentiles to be computed. Returns ------- DataFrame Summary statistics of the Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``. Currently only numeric data is supported. Examples -------- Describing a numeric ``Series``. >>> s = ks.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 For multi-index columns: >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')] >>> df.describe() # doctest: +NORMALIZE_WHITESPACE num a b count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 >>> df[('num', 'b')].describe() count 3.0 mean 5.0 std 1.0 min 4.0 25% 4.0 50% 5.0 75% 6.0 max 6.0 Name: (num, b), dtype: float64 Describing a ``DataFrame`` and selecting custom percentiles. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0] ... }, ... columns=['numeric1', 'numeric2']) >>> df.describe(percentiles = [0.85, 0.15]) numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 15% 1.0 4.0 50% 2.0 5.0 85% 3.0 6.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: numeric1, dtype: float64 Describing a column from a ``DataFrame`` by accessing it as an attribute and selecting custom percentiles. >>> df.numeric1.describe(percentiles = [0.85, 0.15]) count 3.0 mean 2.0 std 1.0 min 1.0 15% 1.0 50% 2.0 85% 3.0 max 3.0 Name: numeric1, dtype: float64 """ exprs = [] column_labels = [] for label in self._internal.column_labels: scol = self._internal.spark_column_for(label) spark_type = self._internal.spark_type_for(label) if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType): exprs.append( F.nanvl(scol, F.lit(None)).alias(self._internal.spark_column_name_for(label)) ) column_labels.append(label) elif isinstance(spark_type, NumericType): exprs.append(scol) column_labels.append(label) if len(exprs) == 0: raise ValueError("Cannot describe a DataFrame without columns") if percentiles is not None: if any((p < 0.0) or (p > 1.0) for p in percentiles): raise ValueError("Percentiles should all be in the interval [0, 1]") # appending 50% if not in percentiles already percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles else: percentiles = [0.25, 0.5, 0.75] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"] sdf = self._internal.spark_frame.select(*exprs).summary(stats) sdf = sdf.replace("stddev", "std", subset="summary") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, "summary")], column_labels=column_labels, data_spark_columns=[ scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels ], ) return DataFrame(internal).astype("float64") def drop_duplicates(self, subset=None, keep="first", inplace=False) -> Optional["DataFrame"]: """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy. Returns ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. >>> df = ks.DataFrame( ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b']) >>> df a b 0 1 a 1 2 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates().sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates('a').sort_index() a b 0 1 a 1 2 a 4 3 d >>> df.drop_duplicates(['a', 'b']).sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep='last').sort_index() a b 0 1 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep=False).sort_index() a b 0 1 a 3 2 c 4 3 d """ inplace = validate_bool_kwarg(inplace, "inplace") sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.where(~scol_for(sdf, column)).drop(column) internal = self._internal.with_new_sdf(sdf) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def reindex( self, labels: Optional[Any] = None, index: Optional[Any] = None, columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None, copy: Optional[bool] = True, fill_value: Optional[Any] = None, ) -> "DataFrame": """ Conform DataFrame to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- labels: array-like, optional New labels / index to conform the axis specified by ‘axis’ to. index, columns: array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data axis: int or str, optional Axis to target. Can be either the axis name (‘index’, ‘columns’) or number (0, 1). copy : bool, default True Return a new object, even if the passed indexes are the same. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. Returns ------- DataFrame with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = ks.DataFrame({ ... 'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index, ... columns=['http_status', 'response_time']) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index).sort_index() http_status response_time Chrome 200.0 0.02 Comodo Dragon NaN NaN IE10 404.0 0.08 Iceweasel NaN NaN Safari 404.0 0.07 We can fill in the missing values by passing a value to the keyword ``fill_value``. >>> df.reindex(new_index, fill_value=0, copy=False).sort_index() http_status response_time Chrome 200 0.02 Comodo Dragon 0 0.00 IE10 404 0.08 Iceweasel 0 0.00 Safari 404 0.07 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']).sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index() http_status user_agent Chrome 200 NaN Firefox 200 NaN IE10 404 NaN Konqueror 301 NaN Safari 404 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2.sort_index() prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2).sort_index() prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN """ if axis is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.") if labels is not None: axis = validate_axis(axis) if axis == 0: index = labels elif axis == 1: columns = labels else: raise ValueError( "No axis named %s for object type %s." % (axis, type(axis).__name__) ) if index is not None and not is_list_like(index): raise TypeError( "Index must be called with a collection of some kind, " "%s was passed" % type(index) ) if columns is not None and not is_list_like(columns): raise TypeError( "Columns must be called with a collection of some kind, " "%s was passed" % type(columns) ) df = self if index is not None: df = df._reindex_index(index, fill_value) if columns is not None: df = df._reindex_columns(columns, fill_value) # Copy if copy and df is self: return df.copy() else: return df def _reindex_index(self, index, fill_value): # When axis is index, we can mimic pandas' by a right outer join. nlevels = self._internal.index_level assert nlevels <= 1 or ( isinstance(index, ks.MultiIndex) and nlevels == index.nlevels ), "MultiIndex DataFrame can only be reindexed with a similar Koalas MultiIndex." index_columns = self._internal.index_spark_column_names frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME) if isinstance(index, ks.Index): if nlevels != index.nlevels: return DataFrame(index._internal.with_new_columns([])).reindex( columns=self.columns, fill_value=fill_value ) index_names = index._internal.index_names scols = index._internal.index_spark_columns labels = index._internal.spark_frame.select( [scol.alias(index_column) for scol, index_column in zip(scols, index_columns)] ) else: kser = ks.Series(list(index)) labels = kser._internal.spark_frame.select(kser.spark.column.alias(index_columns[0])) index_names = self._internal.index_names if fill_value is not None: frame_index_columns = [ verify_temp_column_name(frame, "__frame_index_column_{}__".format(i)) for i in range(nlevels) ] index_scols = [ scol_for(frame, index_col).alias(frame_index_col) for index_col, frame_index_col in zip(index_columns, frame_index_columns) ] scols = self._internal.resolved_copy.data_spark_columns frame = frame.select(index_scols + scols) temp_fill_value = verify_temp_column_name(frame, "__fill_value__") labels = labels.withColumn(temp_fill_value, F.lit(fill_value)) frame_index_scols = [scol_for(frame, col) for col in frame_index_columns] labels_index_scols = [scol_for(labels, col) for col in index_columns] joined_df = frame.join( labels, on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)], how="right", ) joined_df = joined_df.select( *labels_index_scols, *[ F.when( reduce( lambda c1, c2: c1 & c2, [ fcol.isNull() & lcol.isNotNull() for fcol, lcol in zip(frame_index_scols, labels_index_scols) ], ), scol_for(joined_df, temp_fill_value), ) .otherwise(scol_for(joined_df, col)) .alias(col) for col in self._internal.data_spark_column_names ], ) else: joined_df = frame.join(labels, on=index_columns, how="right") sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME) internal = self._internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=index_names, index_dtypes=None, # TODO: dtypes? data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], ) return DataFrame(internal) def _reindex_columns(self, columns, fill_value): level = self._internal.column_labels_level if level > 1: label_columns = list(columns) for col in label_columns: if not isinstance(col, tuple): raise TypeError("Expected tuple, got {}".format(type(col).__name__)) else: label_columns = [(col,) for col in columns] for col in label_columns: if len(col) != level: raise ValueError( "shape (1,{}) doesn't match the shape (1,{})".format(len(col), level) ) fill_value = np.nan if fill_value is None else fill_value scols_or_ksers, labels = [], [] for label in label_columns: if label in self._internal.column_labels: scols_or_ksers.append(self._kser_for(label)) else: scols_or_ksers.append(F.lit(fill_value).alias(name_like_string(label))) labels.append(label) if isinstance(columns, pd.Index): column_label_names = [ name if is_name_like_tuple(name) else (name,) for name in columns.names ] internal = self._internal.with_new_columns( scols_or_ksers, column_labels=labels, column_label_names=column_label_names ) else: internal = self._internal.with_new_columns(scols_or_ksers, column_labels=labels) return DataFrame(internal) def reindex_like(self, other: "DataFrame", copy: bool = True) -> "DataFrame": """ Return a DataFrame with matching indices as other object. Conform the object to the same index on all axes. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : DataFrame Its row and column indices are used to define the new indices of this object. copy : bool, default True Return a new object, even if the passed indexes are the same. Returns ------- DataFrame DataFrame with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = ks.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = ks.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN None 2014-02-15 35.1 NaN medium """ if isinstance(other, DataFrame): return self.reindex(index=other.index, columns=other.columns, copy=copy) else: raise TypeError("other must be a Koalas DataFrame") def melt(self, id_vars=None, value_vars=None, var_name=None, value_name="value") -> "DataFrame": """ Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar, default 'variable' Name to use for the 'variable' column. If None it uses `frame.columns.name` or ‘variable’. value_name : scalar, default 'value' Name to use for the 'value' column. Returns ------- DataFrame Unpivoted DataFrame. Examples -------- >>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}, ... columns=['A', 'B', 'C']) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> ks.melt(df) variable value 0 A a 1 B 1 2 C 2 3 A b 4 B 3 5 C 4 6 A c 7 B 5 8 C 6 >>> df.melt(id_vars='A') A variable value 0 a B 1 1 a C 2 2 b B 3 3 b C 4 4 c B 5 5 c C 6 >>> df.melt(value_vars='A') variable value 0 A a 1 A b 2 A c >>> ks.melt(df, id_vars=['A', 'B']) A B variable value 0 a 1 C 2 1 b 3 C 4 2 c 5 C 6 >>> df.melt(id_vars=['A'], value_vars=['C']) A variable value 0 a C 2 1 b C 4 2 c C 6 The names of 'variable' and 'value' columns can be customized: >>> ks.melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 """ column_labels = self._internal.column_labels if id_vars is None: id_vars = [] else: if isinstance(id_vars, tuple): if self._internal.column_labels_level == 1: id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars] else: raise ValueError( "id_vars must be a list of tuples" " when columns are a MultiIndex" ) elif is_name_like_value(id_vars): id_vars = [(id_vars,)] else: id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars] non_existence_col = [idv for idv in id_vars if idv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'id_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if value_vars is None: value_vars = [] else: if isinstance(value_vars, tuple): if self._internal.column_labels_level == 1: value_vars = [ valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars ] else: raise ValueError( "value_vars must be a list of tuples" " when columns are a MultiIndex" ) elif is_name_like_value(value_vars): value_vars = [(value_vars,)] else: value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars] non_existence_col = [valv for valv in value_vars if valv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'value_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if len(value_vars) == 0: value_vars = column_labels column_labels = [label for label in column_labels if label not in id_vars] sdf = self._internal.spark_frame if var_name is None: if ( self._internal.column_labels_level == 1 and self._internal.column_label_names[0] is None ): var_name = ["variable"] else: var_name = [ name_like_string(name) if name is not None else "variable_{}".format(i) for i, name in enumerate(self._internal.column_label_names) ] elif isinstance(var_name, str): var_name = [var_name] pairs = F.explode( F.array( *[ F.struct( *( [F.lit(c).alias(name) for c, name in zip(label, var_name)] + [self._internal.spark_column_for(label).alias(value_name)] ) ) for label in column_labels if label in value_vars ] ) ) columns = ( [ self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars ] + [F.col("pairs.`%s`" % name) for name in var_name] + [F.col("pairs.`%s`" % value_name)] ) exploded_df = sdf.withColumn("pairs", pairs).select(columns) return DataFrame( InternalFrame( spark_frame=exploded_df, index_spark_columns=None, column_labels=( [label if len(label) == 1 else (name_like_string(label),) for label in id_vars] + [(name,) for name in var_name] + [(value_name,)] ), ) ) def stack(self) -> Union["DataFrame", "Series"]: """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = ks.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack().sort_index() cat height 1 weight 0 dog height 3 weight 2 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = ks.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack().sort_index() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = ks.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN """ from databricks.koalas.series import first_series if len(self._internal.column_labels) == 0: return DataFrame( self._internal.copy( column_label_names=self._internal.column_label_names[:-1] ).with_filter(F.lit(False)) ) column_labels = defaultdict(dict) # type: Union[defaultdict, OrderedDict] index_values = set() should_returns_series = False for label in self._internal.column_labels: new_label = label[:-1] if len(new_label) == 0: new_label = None should_returns_series = True value = label[-1] scol = self._internal.spark_column_for(label) column_labels[new_label][value] = scol index_values.add(value) column_labels = OrderedDict(sorted(column_labels.items(), key=lambda x: x[0])) index_name = self._internal.column_label_names[-1] column_label_names = self._internal.column_label_names[:-1] if len(column_label_names) == 0: column_label_names = [None] index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level) data_columns = [name_like_string(label) for label in column_labels] structs = [ F.struct( [F.lit(value).alias(index_column)] + [ ( column_labels[label][value] if value in column_labels[label] else F.lit(None) ).alias(name) for label, name in zip(column_labels, data_columns) ] ).alias(value) for value in index_values ] pairs = F.explode(F.array(structs)) sdf = self._internal.spark_frame.withColumn("pairs", pairs) sdf = sdf.select( self._internal.index_spark_columns + [sdf["pairs"][index_column].alias(index_column)] + [sdf["pairs"][name].alias(name) for name in data_columns] ) internal = InternalFrame( # TODO: dtypes? spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in (self._internal.index_spark_column_names + [index_column]) ], index_names=self._internal.index_names + [index_name], index_dtypes=self._internal.index_dtypes + [None], column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, # type: ignore ) kdf = DataFrame(internal) # type: "DataFrame" if should_returns_series: return first_series(kdf) else: return kdf def unstack(self) -> Union["DataFrame", "Series"]: """ Pivot the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series. .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and it could cause a serious performance degradation since Spark partitions it row based. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack). Examples -------- >>> df = ks.DataFrame({"A": {"0": "a", "1": "b", "2": "c"}, ... "B": {"0": "1", "1": "3", "2": "5"}, ... "C": {"0": "2", "1": "4", "2": "6"}}, ... columns=["A", "B", "C"]) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() A 0 a 1 b 2 c B 0 1 1 3 2 5 C 0 2 1 4 2 6 dtype: object >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) >>> df.unstack().sort_index() X A 0 a 1 b 2 c B 0 1 1 3 2 5 Y C 0 2 1 4 2 6 dtype: object For MultiIndex case: >>> df = ks.DataFrame({"A": ["a", "b", "c"], ... "B": [1, 3, 5], ... "C": [2, 4, 6]}, ... columns=["A", "B", "C"]) >>> df = df.set_index('A', append=True) >>> df # doctest: +NORMALIZE_WHITESPACE B C A 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE B C A a b c a b c 0 1.0 NaN NaN 2.0 NaN NaN 1 NaN 3.0 NaN NaN 4.0 NaN 2 NaN NaN 5.0 NaN NaN 6.0 """ from databricks.koalas.series import first_series if self._internal.index_level > 1: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: self._internal.index_level - 1] columns = df.columns[self._internal.index_level - 1] df = df.pivot_table( index=index, columns=columns, values=self._internal.column_labels, aggfunc="first" ) internal = df._internal.copy( index_names=self._internal.index_names[:-1], index_dtypes=self._internal.index_dtypes[:-1], column_label_names=( df._internal.column_label_names[:-1] + [ None if self._internal.index_names[-1] is None else df._internal.column_label_names[-1] ] ), ) return DataFrame(internal) # TODO: Codes here are similar with melt. Should we deduplicate? column_labels = self._internal.column_labels ser_name = SPARK_DEFAULT_SERIES_NAME sdf = self._internal.spark_frame new_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] new_index_map = list(zip(new_index_columns, self._internal.column_label_names)) pairs = F.explode( F.array( *[ F.struct( *( [F.lit(c).alias(name) for c, name in zip(idx, new_index_columns)] + [self._internal.spark_column_for(idx).alias(ser_name)] ) ) for idx in column_labels ] ) ) columns = [ F.col("pairs.%s" % name) for name in new_index_columns[: self._internal.column_labels_level] ] + [F.col("pairs.%s" % ser_name)] new_index_len = len(new_index_columns) existing_index_columns = [] for i, index_name in enumerate(self._internal.index_names): new_index_map.append((SPARK_INDEX_NAME_FORMAT(i + new_index_len), index_name)) existing_index_columns.append( self._internal.index_spark_columns[i].alias( SPARK_INDEX_NAME_FORMAT(i + new_index_len) ) ) exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns) index_spark_column_names, index_names = zip(*new_index_map) return first_series( DataFrame( InternalFrame( # TODO: dtypes? exploded_df, index_spark_columns=[ scol_for(exploded_df, col) for col in index_spark_column_names ], index_names=list(index_names), column_labels=[None], ) ) ) # TODO: axis, skipna, and many arguments should be implemented. def all(self, axis: Union[int, str] = 0) -> "Series": """ Return whether all elements are True. Returns True unless there is at least one element within a series that is False or equivalent (e.g. zero or empty) Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Returns ------- Series Examples -------- Create a dataframe from a dictionary. >>> df = ks.DataFrame({ ... 'col1': [True, True, True], ... 'col2': [True, False, False], ... 'col3': [0, 0, 0], ... 'col4': [1, 2, 3], ... 'col5': [True, True, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return a boolean. >>> df.all() col1 True col2 False col3 False col4 True col5 True col6 False dtype: bool """ from databricks.koalas.series import first_series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') applied = [] column_labels = self._internal.column_labels for label in column_labels: scol = self._internal.spark_column_for(label) all_col = F.min(F.coalesce(scol.cast("boolean"), F.lit(True))) applied.append(F.when(all_col.isNull(), True).otherwise(all_col)) # TODO: there is a similar logic to transpose in, for instance, # DataFrame.any, Series.quantile. Maybe we should deduplicate it. value_column = "value" cols = [] for label, applied_col in zip(column_labels, applied): cols.append( F.struct( [F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] + [applied_col.alias(value_column)] ) ) sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select( F.explode(F.col("arrays")) ) sdf = sdf.selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(self._internal.column_labels_level) ], index_names=self._internal.column_label_names, column_labels=[None], data_spark_columns=[scol_for(sdf, value_column)], ) return first_series(DataFrame(internal)) # TODO: axis, skipna, and many arguments should be implemented. def any(self, axis: Union[int, str] = 0) -> "Series": """ Return whether any element is True. Returns False unless there is at least one element within a series that is True or equivalent (e.g. non-zero or non-empty). Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Returns ------- Series Examples -------- Create a dataframe from a dictionary. >>> df = ks.DataFrame({ ... 'col1': [False, False, False], ... 'col2': [True, False, False], ... 'col3': [0, 0, 1], ... 'col4': [0, 1, 2], ... 'col5': [False, False, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return a boolean. >>> df.any() col1 False col2 True col3 True col4 True col5 False col6 True dtype: bool """ from databricks.koalas.series import first_series axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') applied = [] column_labels = self._internal.column_labels for label in column_labels: scol = self._internal.spark_column_for(label) all_col = F.max(F.coalesce(scol.cast("boolean"), F.lit(False))) applied.append(F.when(all_col.isNull(), False).otherwise(all_col)) # TODO: there is a similar logic to transpose in, for instance, # DataFrame.all, Series.quantile. Maybe we should deduplicate it. value_column = "value" cols = [] for label, applied_col in zip(column_labels, applied): cols.append( F.struct( [F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] + [applied_col.alias(value_column)] ) ) sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select( F.explode(F.col("arrays")) ) sdf = sdf.selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(self._internal.column_labels_level) ], index_names=self._internal.column_label_names, column_labels=[None], data_spark_columns=[scol_for(sdf, value_column)], ) return first_series(DataFrame(internal)) # TODO: add axis, numeric_only, pct, na_option parameter def rank(self, method="average", ascending=True) -> "DataFrame": """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. .. note:: the current implementation of rank uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) Returns ------- ranks : same type as caller Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B']) >>> df A B 0 1 4 1 2 3 2 2 2 3 3 1 >>> df.rank().sort_index() A B 0 1.0 4.0 1 2.5 3.0 2 2.5 2.0 3 4.0 1.0 If method is set to 'min', it use lowest rank in group. >>> df.rank(method='min').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 4.0 1.0 If method is set to 'max', it use highest rank in group. >>> df.rank(method='max').sort_index() A B 0 1.0 4.0 1 3.0 3.0 2 3.0 2.0 3 4.0 1.0 If method is set to 'dense', it leaves no gaps in group. >>> df.rank(method='dense').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 3.0 1.0 """ return self._apply_series_op( lambda kser: kser._rank(method=method, ascending=ascending), should_resolve=True ) def filter(self, items=None, like=None, regex=None, axis=None) -> "DataFrame": """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 For a Series, >>> # select rows by name >>> df.one.filter(items=['rabbit']) rabbit 4 Name: one, dtype: int64 >>> # select rows by regular expression >>> df.one.filter(regex='e$') mouse 1 Name: one, dtype: int64 >>> # select rows containing 'bbi' >>> df.one.filter(like='bbi') rabbit 4 Name: one, dtype: int64 """ if sum(x is not None for x in (items, like, regex)) > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) axis = validate_axis(axis, none_axis=1) index_scols = self._internal.index_spark_columns if items is not None: if is_list_like(items): items = list(items) else: raise ValueError("items should be a list-like object.") if axis == 0: if len(index_scols) == 1: col = None for item in items: if col is None: col = index_scols[0] == F.lit(item) else: col = col | (index_scols[0] == F.lit(item)) elif len(index_scols) > 1: # for multi-index col = None for item in items: if not isinstance(item, tuple): raise TypeError("Unsupported type {}".format(type(item).__name__)) if not item: raise ValueError("The item should not be empty.") midx_col = None for i, element in enumerate(item): if midx_col is None: midx_col = index_scols[i] == F.lit(element) else: midx_col = midx_col & (index_scols[i] == F.lit(element)) if col is None: col = midx_col else: col = col | midx_col else: raise ValueError("Single or multi index must be specified.") return DataFrame(self._internal.with_filter(col)) else: return self[items] elif like is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.contains(like) else: col = col | index_scol.contains(like) return DataFrame(self._internal.with_filter(col)) else: column_labels = self._internal.column_labels output_labels = [label for label in column_labels if any(like in i for i in label)] return self[output_labels] elif regex is not None: if axis == 0: col = None for index_scol in index_scols: if col is None: col = index_scol.rlike(regex) else: col = col | index_scol.rlike(regex) return DataFrame(self._internal.with_filter(col)) else: column_labels = self._internal.column_labels matcher = re.compile(regex) output_labels = [ label for label in column_labels if any(matcher.search(i) is not None for i in label) ] return self[output_labels] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def rename( self, mapper=None, index=None, columns=None, axis="index", inplace=False, level=None, errors="ignore", ) -> Optional["DataFrame"]: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don’t throw an error. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis’ values. Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index` and `columns`. index : dict-like or function Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper"). columns : dict-like or function Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper"). axis : int or str, default 'index' Axis to target with mapper. Can be either the axis name ('index', 'columns') or number (0, 1). inplace : bool, default False Whether to return a new DataFrame. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame with the renamed axis labels. Raises ------ `KeyError` If any of the labels is not found in the selected axis and "errors='raise'". Examples -------- >>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE a c 0 1 4 1 2 5 2 3 6 >>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> def str_lower(s) -> str: ... return str.lower(s) >>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE a b 0 1 4 1 2 5 2 3 6 >>> def mul10(x) -> int: ... return x * 10 >>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]) >>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) >>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE x y A B C D 0 1 2 3 4 1 5 6 7 8 >>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab')) >>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE a b x a 1 2 b 3 4 y c 5 6 d 7 8 """ def gen_mapper_fn(mapper): if isinstance(mapper, dict): if len(mapper) == 0: if errors == "raise": raise KeyError("Index include label which is not in the `mapper`.") else: return DataFrame(self._internal) type_set = set(map(lambda x: type(x), mapper.values())) if len(type_set) > 1: raise ValueError("Mapper dict should have the same value type.") spark_return_type = as_spark_type(list(type_set)[0]) def mapper_fn(x): if x in mapper: return mapper[x] else: if errors == "raise": raise KeyError("Index include value which is not in the `mapper`") return x elif callable(mapper): spark_return_type = infer_return_type(mapper).tpe def mapper_fn(x): return mapper(x) else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) return mapper_fn, spark_return_type index_mapper_fn = None index_mapper_ret_stype = None columns_mapper_fn = None inplace = validate_bool_kwarg(inplace, "inplace") if mapper: axis = validate_axis(axis) if axis == 0: index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper) elif axis == 1: columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper) else: raise ValueError( "argument axis should be either the axis name " "(‘index’, ‘columns’) or number (0, 1)" ) else: if index: index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index) if columns: columns_mapper_fn, _ = gen_mapper_fn(columns) if not index and not columns: raise ValueError("Either `index` or `columns` should be provided.") kdf = self.copy() if index_mapper_fn: # rename index labels, if `level` is None, rename all index columns, otherwise only # rename the corresponding level index. # implement this by transform the underlying spark dataframe, # Example: # suppose the kdf index column in underlying spark dataframe is "index_0", "index_1", # if rename level 0 index labels, will do: # ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))`` # if rename all index labels (`level` is None), then will do: # ``` # kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0")) # .withColumn("index_1", mapper_fn_udf(col("index_1")) # ``` index_columns = kdf._internal.index_spark_column_names num_indices = len(index_columns) if level: if level < 0 or level >= num_indices: raise ValueError("level should be an integer between [0, num_indices)") def gen_new_index_column(level): index_col_name = index_columns[level] index_mapper_udf = pandas_udf( lambda s: s.map(index_mapper_fn), returnType=index_mapper_ret_stype ) return index_mapper_udf(scol_for(kdf._internal.spark_frame, index_col_name)) sdf = kdf._internal.resolved_copy.spark_frame index_dtypes = self._internal.index_dtypes.copy() if level is None: for i in range(num_indices): sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i)) index_dtypes[i] = None # TODO: dtype? else: sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level)) index_dtypes[level] = None # TODO: dtype? kdf = DataFrame(kdf._internal.with_new_sdf(sdf, index_dtypes=index_dtypes)) if columns_mapper_fn: # rename column name. # Will modify the `_internal._column_labels` and transform underlying spark dataframe # to the same column name with `_internal._column_labels`. if level: if level < 0 or level >= kdf._internal.column_labels_level: raise ValueError("level should be an integer between [0, column_labels_level)") def gen_new_column_labels_entry(column_labels_entry): if isinstance(column_labels_entry, tuple): if level is None: # rename all level columns return tuple(map(columns_mapper_fn, column_labels_entry)) else: # only rename specified level column entry_list = list(column_labels_entry) entry_list[level] = columns_mapper_fn(entry_list[level]) return tuple(entry_list) else: return columns_mapper_fn(column_labels_entry) new_column_labels = list(map(gen_new_column_labels_entry, kdf._internal.column_labels)) new_data_scols = [ kdf._kser_for(old_label).rename(new_label) for old_label, new_label in zip(kdf._internal.column_labels, new_column_labels) ] kdf = DataFrame(kdf._internal.with_new_columns(new_data_scols)) if inplace: self._update_internal_frame(kdf._internal) return None else: return kdf def rename_axis( self, mapper: Optional[Any] = None, index: Optional[Any] = None, columns: Optional[Any] = None, axis: Optional[Union[int, str]] = 0, inplace: Optional[bool] = False, ) -> Optional["DataFrame"]: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional A scalar, list-like, dict-like or functions transformations to apply to the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. inplace : bool, default False Modifies the object directly, instead of creating a new DataFrame. Returns ------- DataFrame, or None if `inplace` is True. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. The second calling convention will modify the names of the corresponding index specified by axis. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- >>> df = ks.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... index=["dog", "cat", "monkey"], ... columns=["num_legs", "num_arms"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal").sort_index() >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_arms animal cat 4 0 dog 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns").sort_index() >>> df # doctest: +NORMALIZE_WHITESPACE limbs num_legs num_arms animal cat 4 0 dog 4 0 monkey 2 2 **MultiIndex** >>> index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df = ks.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... index=index, ... columns=["num_legs", "num_arms"]) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE num_legs num_arms class name mammal cat 4 0 dog 4 0 monkey 2 2 >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE num_legs num_arms TYPE NAME mammal cat 4 0 dog 4 0 monkey 2 2 """ def gen_names(v, curnames): if is_scalar(v): newnames = [v] elif is_list_like(v) and not is_dict_like(v): newnames = list(v) elif is_dict_like(v): newnames = [v[name] if name in v else name for name in curnames] elif callable(v): newnames = [v(name) for name in curnames] else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) if len(newnames) != len(curnames): raise ValueError( "Length of new names must be {}, got {}".format(len(curnames), len(newnames)) ) return [name if is_name_like_tuple(name) else (name,) for name in newnames] if mapper is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.") if mapper is not None: axis = validate_axis(axis) if axis == 0: index = mapper elif axis == 1: columns = mapper column_label_names = ( gen_names(columns, self.columns.names) if columns is not None else self._internal.column_label_names ) index_names = ( gen_names(index, self.index.names) if index is not None else self._internal.index_names ) internal = self._internal.copy( index_names=index_names, column_label_names=column_label_names ) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def keys(self) -> pd.Index: """ Return alias for columns. Returns ------- Index Columns of the DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 >>> df.keys() Index(['max_speed', 'shield'], dtype='object') """ return self.columns def pct_change(self, periods=1) -> "DataFrame": """ Percentage change between the current and a prior element. .. note:: the current implementation of this API uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. Returns ------- DataFrame Examples -------- Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = ks.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 You can set periods to shift for forming percent change >>> df.pct_change(2) FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 NaN NaN NaN 1980-03-01 0.067912 0.073814 0.06883 """ window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods) def op(kser): prev_row = F.lag(kser.spark.column, periods).over(window) return ((kser.spark.column - prev_row) / prev_row).alias( kser._internal.data_spark_column_names[0] ) return self._apply_series_op(op, should_resolve=True) # TODO: axis = 1 def idxmax(self, axis=0) -> "Series": """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with maximum value using `to_pandas()` because we suppose the number of rows with max values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmax Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmax() a 2 b 0 c 2 dtype: int64 For Multi-column Index >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmax() a x 2 b y 0 c z 2 dtype: int64 """ max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns) sdf_max = self._internal.spark_frame.select(*max_cols).head() # `sdf_max` looks like below # +------+------+------+ # |(a, x)|(b, y)|(c, z)| # +------+------+------+ # | 3| 4.0| 400| # +------+------+------+ conds = ( scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max) ) cond = reduce(lambda x, y: x | y, conds) kdf = DataFrame(self._internal.with_filter(cond)) # type: "DataFrame" return cast(ks.Series, ks.from_pandas(kdf._to_internal_pandas().idxmax())) # TODO: axis = 1 def idxmin(self, axis=0) -> "Series": """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with minimum value using `to_pandas()` because we suppose the number of rows with min values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmin Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmin() a 0 b 3 c 1 dtype: int64 For Multi-column Index >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmin() a x 0 b y 3 c z 1 dtype: int64 """ min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns) sdf_min = self._internal.spark_frame.select(*min_cols).head() conds = ( scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min) ) cond = reduce(lambda x, y: x | y, conds) kdf = DataFrame(self._internal.with_filter(cond)) # type: "DataFrame" return cast(ks.Series, ks.from_pandas(kdf._to_internal_pandas().idxmin())) def info(self, verbose=None, buf=None, max_cols=None, null_counts=None) -> None: """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. null_counts : bool, optional Whether to show the non-null counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = ks.DataFrame( ... {"int_col": int_values, "text_col": text_values, "float_col": float_values}, ... columns=['int_col', 'text_col', 'float_col']) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) # doctest: +SKIP <class 'databricks.koalas.frame.DataFrame'> Index: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) # doctest: +SKIP <class 'databricks.koalas.frame.DataFrame'> Index: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open('%s/info.txt' % path, "w", ... encoding="utf-8") as f: ... _ = f.write(s) >>> with open('%s/info.txt' % path) as f: ... f.readlines() # doctest: +SKIP ["<class 'databricks.koalas.frame.DataFrame'>\\n", 'Index: 5 entries, 0 to 4\\n', 'Data columns (total 3 columns):\\n', ' # Column Non-Null Count Dtype \\n', '--- ------ -------------- ----- \\n', ' 0 int_col 5 non-null int64 \\n', ' 1 text_col 5 non-null object \\n', ' 2 float_col 5 non-null float64\\n', 'dtypes: float64(1), int64(1), object(1)'] """ # To avoid pandas' existing config affects Koalas. # TODO: should we have corresponding Koalas configs? with pd.option_context( "display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize ): try: # hack to use pandas' info as is. object.__setattr__(self, "_data", self) count_func = self.count self.count = lambda: count_func().to_pandas() # type: ignore return pd.DataFrame.info( self, verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts, ) finally: del self._data self.count = count_func # type: ignore # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas' def quantile( self, q: Union[float, Iterable[float]] = 0.5, axis: Union[int, str] = 0, numeric_only: bool = True, accuracy: int = 10000, ) -> Union["DataFrame", "Series"]: """ Return value at the given quantile. .. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon approximate percentile computation because computing quantile across a large dataset is extremely expensive. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. axis : int or str, default 0 or 'index' Can only be set to 0 at the moment. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. Can only be set to True at the moment. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]}) >>> kdf a b 0 1 6 1 2 7 2 3 8 3 4 9 4 5 0 >>> kdf.quantile(.5) a 3.0 b 7.0 Name: 0.5, dtype: float64 >>> kdf.quantile([.25, .5, .75]) a b 0.25 2.0 6.0 0.50 3.0 7.0 0.75 4.0 8.0 """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if not isinstance(accuracy, int): raise ValueError( "accuracy must be an integer; however, got [%s]" % type(accuracy).__name__ ) if isinstance(q, Iterable): q = list(q) for v in q if isinstance(q, list) else [q]: if not isinstance(v, float): raise ValueError( "q must be a float or an array of floats; however, [%s] found." % type(v) ) if v < 0.0 or v > 1.0: raise ValueError("percentiles should all be in the interval [0, 1].") def quantile(spark_column, spark_type): if isinstance(spark_type, (BooleanType, NumericType)): return SF.percentile_approx(spark_column.cast(DoubleType()), q, accuracy) else: raise TypeError( "Could not convert {} ({}) to numeric".format( spark_type_to_pandas_dtype(spark_type), spark_type.simpleString() ) ) if isinstance(q, list): # First calculate the percentiles from all columns and map it to each `quantiles` # by creating each entry as a struct. So, it becomes an array of structs as below: # # +-----------------------------------------+ # | arrays| # +-----------------------------------------+ # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]| # +-----------------------------------------+ percentile_cols = [] percentile_col_names = [] column_labels = [] for label, column in zip( self._internal.column_labels, self._internal.data_spark_column_names ): spark_type = self._internal.spark_type_for(label) is_numeric_or_boolean = isinstance(spark_type, (NumericType, BooleanType)) keep_column = not numeric_only or is_numeric_or_boolean if keep_column: percentile_col = quantile(self._internal.spark_column_for(label), spark_type) percentile_cols.append(percentile_col.alias(column)) percentile_col_names.append(column) column_labels.append(label) if len(percentile_cols) == 0: return DataFrame(index=q) sdf = self._internal.spark_frame.select(percentile_cols) # Here, after select percentile cols, a spark_frame looks like below: # +---------+---------+ # | a| b| # +---------+---------+ # |[2, 3, 4]|[6, 7, 8]| # +---------+---------+ cols_dict = OrderedDict() # type: OrderedDict for column in percentile_col_names: cols_dict[column] = list() for i in range(len(q)): cols_dict[column].append(scol_for(sdf, column).getItem(i).alias(column)) internal_index_column = SPARK_DEFAULT_INDEX_NAME cols = [] for i, col in enumerate(zip(*cols_dict.values())): cols.append(F.struct(F.lit(q[i]).alias(internal_index_column), *col)) sdf = sdf.select(F.array(*cols).alias("arrays")) # And then, explode it and manually set the index. # +-----------------+---+---+ # |__index_level_0__| a| b| # +-----------------+---+---+ # | 0.25| 2| 6| # | 0.5| 3| 7| # | 0.75| 4| 8| # +-----------------+---+---+ sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*") internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, internal_index_column)], column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names], ) return DataFrame(internal) else: return self._reduce_for_stat_function( quantile, name="quantile", numeric_only=numeric_only ).rename(q) def query(self, expr, inplace=False) -> Optional["DataFrame"]: """ Query the columns of a DataFrame with a boolean expression. .. note:: Internal columns that starting with a '__' prefix are able to access, however, they are not supposed to be accessed. .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the pandas specific syntax such as `@` is not supported. If you want the pandas syntax, you can work around with :meth:`DataFrame.koalas.apply_batch`, but you should be aware that `query_func` will be executed at different nodes in a distributed manner. So, for example, to use `@` syntax, make sure the variable is serialized by, for example, putting it within the closure as below. >>> df = ks.DataFrame({'A': range(2000), 'B': range(2000)}) >>> def query_func(pdf): ... num = 1995 ... return pdf.query('A > @num') >>> df.koalas.apply_batch(query_func) A B 1996 1996 1996 1997 1997 1997 1998 1998 1998 1999 1999 1999 Parameters ---------- expr : str The query string to evaluate. You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. Returns ------- DataFrame DataFrame resulting from the provided query expression. Examples -------- >>> df = ks.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ if isinstance(self.columns, pd.MultiIndex): raise ValueError("Doesn't support for MultiIndex columns") if not isinstance(expr, str): raise ValueError( "expr must be a string to be evaluated, {} given".format(type(expr).__name__) ) inplace = validate_bool_kwarg(inplace, "inplace") data_columns = [label[0] for label in self._internal.column_labels] sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [ scol.alias(col) for scol, col in zip(self._internal.data_spark_columns, data_columns) ] ).filter(expr) internal = self._internal.with_new_sdf(sdf, data_columns=data_columns) if inplace: self._update_internal_frame(internal) return None else: return DataFrame(internal) def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None) -> None: warnings.warn( "DataFrame.explain is deprecated as of DataFrame.spark.explain. " "Please use the API instead.", FutureWarning, ) return self.spark.explain(extended, mode) explain.__doc__ = SparkFrameMethods.explain.__doc__ def take(self, indices, axis=0, **kwargs) -> "DataFrame": """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]).sort_index() name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]).sort_index() name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ axis = validate_axis(axis) if not is_list_like(indices) or isinstance(indices, (dict, set)): raise ValueError("`indices` must be a list-like except dict or set") if axis == 0: return cast(DataFrame, self.iloc[indices, :]) else: return cast(DataFrame, self.iloc[:, indices]) def eval(self, expr, inplace=False) -> Optional[Union["DataFrame", "Series"]]: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. Returns ------- The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Examples -------- >>> df = ks.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from databricks.koalas.series import first_series if isinstance(self.columns, pd.MultiIndex): raise ValueError("`eval` is not supported for multi-index columns") inplace = validate_bool_kwarg(inplace, "inplace") should_return_series = False series_name = None should_return_scalar = False # Since `eval_func` doesn't have a type hint, inferring the schema is always preformed # in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`, # and `should_return_scalar` can be updated. def eval_func(pdf): nonlocal should_return_series nonlocal series_name nonlocal should_return_scalar result_inner = pdf.eval(expr, inplace=inplace) if inplace: result_inner = pdf if isinstance(result_inner, pd.Series): should_return_series = True series_name = result_inner.name result_inner = result_inner.to_frame() elif is_scalar(result_inner): should_return_scalar = True result_inner = pd.Series(result_inner).to_frame() return result_inner result = self.koalas.apply_batch(eval_func) if inplace: # Here, the result is always a frame because the error is thrown during schema inference # from pandas. self._update_internal_frame(result._internal, requires_same_anchor=False) return None elif should_return_series: return first_series(result).rename(series_name) elif should_return_scalar: return first_series(result)[0] else: # Returns a frame return result def explode(self, column) -> "DataFrame": """ Transform each element of a list-like to a row, replicating index values. Parameters ---------- column : str or tuple Column to explode. Returns ------- DataFrame Exploded lists to rows of the subset columns; index will be duplicated for these rows. See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) index labels. DataFrame.melt : Unpivot a DataFrame from wide format to long format. Examples -------- >>> df = ks.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1}) >>> df A B 0 [1, 2, 3] 1 1 [] 1 2 [3, 4] 1 >>> df.explode('A') A B 0 1.0 1 0 2.0 1 0 3.0 1 1 NaN 1 2 3.0 1 2 4.0 1 """ from databricks.koalas.series import Series if not is_name_like_value(column): raise ValueError("column must be a scalar") kdf = DataFrame(self._internal.resolved_copy) # type: "DataFrame" kser = kdf[column] if not isinstance(kser, Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(column) ) if not isinstance(kser.spark.data_type, ArrayType): return self.copy() sdf = kdf._internal.spark_frame.withColumn( kser._internal.data_spark_column_names[0], F.explode_outer(kser.spark.column) ) data_dtypes = kdf._internal.data_dtypes.copy() data_dtypes[kdf._internal.column_labels.index(kser._column_label)] = None # TODO: dtype? internal = kdf._internal.with_new_sdf(sdf, data_dtypes=data_dtypes) return DataFrame(internal) def mad(self, axis=0) -> "Series": """ Return the mean absolute deviation of values. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]}, ... columns=['a', 'b']) >>> df.mad() a 0.666667 b 0.066667 dtype: float64 >>> df.mad(axis=1) 0 0.45 1 0.90 2 1.35 3 NaN dtype: float64 """ from databricks.koalas.series import first_series axis = validate_axis(axis) if axis == 0: def get_spark_column(kdf, label): scol = kdf._internal.spark_column_for(label) col_type = kdf._internal.spark_type_for(label) if isinstance(col_type, BooleanType): scol = scol.cast("integer") return scol new_column_labels = [] for label in self._internal.column_labels: # Filtering out only columns of numeric and boolean type column. dtype = self._kser_for(label).spark.data_type if isinstance(dtype, (NumericType, BooleanType)): new_column_labels.append(label) new_columns = [ F.avg(get_spark_column(self, label)).alias(name_like_string(label)) for label in new_column_labels ] mean_data = self._internal.spark_frame.select(new_columns).first() new_columns = [ F.avg( F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)]) ).alias(name_like_string(label)) for label in new_column_labels ] sdf = self._internal.spark_frame.select( [F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + new_columns ) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context("compute.max_rows", 1): internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return first_series(DataFrame(internal).transpose()) else: @pandas_udf(returnType=DoubleType()) def calculate_columns_axis(*cols): return pd.concat(cols, axis=1).mad(axis=1) internal = self._internal.copy( column_labels=[None], data_spark_columns=[ calculate_columns_axis(*self._internal.data_spark_columns).alias( SPARK_DEFAULT_SERIES_NAME ) ], data_dtypes=[None], column_label_names=None, ) return first_series(DataFrame(internal)) def tail(self, n=5) -> "DataFrame": """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() # doctest: +SKIP animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) # doctest: +SKIP animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) # doctest: +SKIP animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if LooseVersion(pyspark.__version__) < LooseVersion("3.0"): raise RuntimeError("tail can be used in PySpark >= 3.0") if not isinstance(n, int): raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__)) if n < 0: n = len(self) + n if n <= 0: return ks.DataFrame(self._internal.with_filter(F.lit(False))) # Should use `resolved_copy` here for the case like `(kdf + 1).tail()` sdf = self._internal.resolved_copy.spark_frame rows = sdf.tail(n) new_sdf = default_session().createDataFrame(rows, sdf.schema) return DataFrame(self._internal.with_new_sdf(new_sdf)) def align( self, other: Union["DataFrame", "Series"], join: str = "outer", axis: Optional[Union[int, str]] = None, copy: bool = True, ) -> Tuple["DataFrame", Union["DataFrame", "Series"]]: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. Returns ------- (left, right) : (DataFrame, type of other) Aligned objects. Examples -------- >>> ks.set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30]) >>> df2 = ks.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12]) Align both axis: >>> aligned_l, aligned_r = df1.align(df2) >>> aligned_l.sort_index() a b c 10 1.0 a NaN 11 NaN None NaN 12 NaN None NaN 20 2.0 b NaN 30 3.0 c NaN >>> aligned_r.sort_index() a b c 10 4.0 NaN d 11 5.0 NaN e 12 6.0 NaN f 20 NaN NaN None 30 NaN NaN None Align only axis=0 (index): >>> aligned_l, aligned_r = df1.align(df2, axis=0) >>> aligned_l.sort_index() a b 10 1.0 a 11 NaN None 12 NaN None 20 2.0 b 30 3.0 c >>> aligned_r.sort_index() a c 10 4.0 d 11 5.0 e 12 6.0 f 20 NaN None 30 NaN None Align only axis=1 (column): >>> aligned_l, aligned_r = df1.align(df2, axis=1) >>> aligned_l.sort_index() a b c 10 1 a NaN 20 2 b NaN 30 3 c NaN >>> aligned_r.sort_index() a b c 10 4 NaN d 11 5 NaN e 12 6 NaN f Align with the join type "inner": >>> aligned_l, aligned_r = df1.align(df2, join="inner") >>> aligned_l.sort_index() a 10 1 >>> aligned_r.sort_index() a 10 4 Align with a Series: >>> s = ks.Series([7, 8, 9], index=[10, 11, 12]) >>> aligned_l, aligned_r = df1.align(s, axis=0) >>> aligned_l.sort_index() a b 10 1.0 a 11 NaN None 12 NaN None 20 2.0 b 30 3.0 c >>> aligned_r.sort_index() 10 7.0 11 8.0 12 9.0 20 NaN 30 NaN dtype: float64 >>> ks.reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series, first_series if not isinstance(other, (DataFrame, Series)): raise TypeError("unsupported type: {}".format(type(other).__name__)) how = validate_how(join) axis = validate_axis(axis, None) right_is_series = isinstance(other, Series) if right_is_series: if axis is None: raise ValueError("Must specify axis=0 or 1") elif axis != 0: raise NotImplementedError( "align currently only works for axis=0 when right is Series" ) left = self right = other if (axis is None or axis == 0) and not same_anchor(left, right): combined = combine_frames(left, right, how=how) left = combined["this"] right = combined["that"] if right_is_series: right = first_series(right).rename(other.name) if ( axis is None or axis == 1 ) and left._internal.column_labels != right._internal.column_labels: if left._internal.column_labels_level != right._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") left = left.copy() right = right.copy() if how == "full": column_labels = sorted( list(set(left._internal.column_labels) | set(right._internal.column_labels)) ) elif how == "inner": column_labels = sorted( list(set(left._internal.column_labels) & set(right._internal.column_labels)) ) elif how == "left": column_labels = left._internal.column_labels else: column_labels = right._internal.column_labels for label in column_labels: if label not in left._internal.column_labels: left[label] = F.lit(None).cast(DoubleType()) left = left[column_labels] for label in column_labels: if label not in right._internal.column_labels: right[label] = F.lit(None).cast(DoubleType()) right = right[column_labels] return (left.copy(), right.copy()) if copy else (left, right) @staticmethod def from_dict(data, orient="columns", dtype=None, columns=None) -> "DataFrame": """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]} >>> ks.DataFrame.from_dict(data) col_1 col_2 0 3 10 1 2 20 2 1 30 3 0 40 Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]} >>> ks.DataFrame.from_dict(data, orient='index').sort_index() 0 1 2 3 row_1 3 2 1 0 row_2 10 20 30 40 When using the 'index' orientation, the column names can be specified manually: >>> ks.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']).sort_index() A B C D row_1 3 2 1 0 row_2 10 20 30 40 """ return DataFrame(pd.DataFrame.from_dict(data, orient=orient, dtype=dtype, columns=columns)) def _to_internal_pandas(self): """ Return a pandas DataFrame directly from _internal to avoid overhead of copy. This method is for internal use only. """ return self._internal.to_pandas_frame def _get_or_create_repr_pandas_cache(self, n): if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache: object.__setattr__( self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()} ) return self._repr_pandas_cache[n] def __repr__(self): max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_string() pdf = self._get_or_create_repr_pandas_cache(max_display_count) pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_string = pdf.to_string(show_dimensions=True) match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format( nrows=nrows, ncols=ncols ) return REPR_PATTERN.sub(footer, repr_string) return pdf.to_string() def _repr_html_(self): max_display_count = get_option("display.max_rows") # pandas 0.25.1 has a regression about HTML representation so 'bold_rows' # has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204 bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__)) if max_display_count is None: return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows) pdf = self._get_or_create_repr_pandas_cache(max_display_count) pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows) match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ( "\n<p>Showing only the first {rows} rows " "{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols) ) return REPR_HTML_PATTERN.sub(footer, repr_html) return pdf.to_html(notebook=True, bold_rows=bold_rows) def __getitem__(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") elif isinstance(key, Series): return self.loc[key.astype(bool)] elif isinstance(key, slice): if any(type(n) == int or None for n in [key.start, key.stop]): # Seems like pandas Frame always uses int as positional search when slicing # with ints. return self.iloc[key] return self.loc[key] elif is_name_like_value(key): return self.loc[:, key] elif is_list_like(key): return self.loc[:, list(key)] raise NotImplementedError(key) def __setitem__(self, key, value): from databricks.koalas.series import Series if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self): # Different Series or DataFrames level = self._internal.column_labels_level key = DataFrame._index_normalized_label(level, key) value = DataFrame._index_normalized_frame(level, value) def assign_columns(kdf, this_column_labels, that_column_labels): assert len(key) == len(that_column_labels) # Note that here intentionally uses `zip_longest` that combine # that_columns. for k, this_label, that_label in zip_longest( key, this_column_labels, that_column_labels ): yield (kdf._kser_for(that_label), tuple(["that", *k])) if this_label is not None and this_label[1:] != k: yield (kdf._kser_for(this_label), this_label) kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left") elif isinstance(value, list): if len(self) != len(value): raise ValueError("Length of values does not match length of index") # TODO: avoid using default index? with option_context( "compute.default_index_type", "distributed-sequence", "compute.ops_on_diff_frames", True, ): kdf = self.reset_index() kdf[key] = ks.DataFrame(value) kdf = kdf.set_index(kdf.columns[: self._internal.index_level]) kdf.index.names = self.index.names elif isinstance(key, list): assert isinstance(value, DataFrame) # Same DataFrames. field_names = value.columns kdf = self._assign({k: value[c] for k, c in zip(key, field_names)}) else: # Same Series. kdf = self._assign({key: value}) self._update_internal_frame(kdf._internal) @staticmethod def _index_normalized_label(level, labels): """ Returns a label that is normalized against the current column index level. For example, the key "abc" can be ("abc", "", "") if the current Frame has a multi-index for its column """ if is_name_like_tuple(labels): labels = [labels] elif is_name_like_value(labels): labels = [(labels,)] else: labels = [k if is_name_like_tuple(k) else (k,) for k in labels] if any(len(label) > level for label in labels): raise KeyError( "Key length ({}) exceeds index depth ({})".format( max(len(label) for label in labels), level ) ) return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels] @staticmethod def _index_normalized_frame(level, kser_or_kdf): """ Returns a frame that is normalized against the current column index level. For example, the name in `pd.Series([...], name="abc")` can be can be ("abc", "", "") if the current DataFrame has a multi-index for its column """ from databricks.koalas.series import Series if isinstance(kser_or_kdf, Series): kdf = kser_or_kdf.to_frame() else: assert isinstance(kser_or_kdf, DataFrame), type(kser_or_kdf) kdf = kser_or_kdf.copy() kdf.columns = pd.MultiIndex.from_tuples( [ tuple([name_like_string(label)] + ([""] * (level - 1))) for label in kdf._internal.column_labels ], ) return kdf def __getattr__(self, key: str) -> Any: if key.startswith("__"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) try: return self.loc[:, key] except KeyError: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, key) ) def __setattr__(self, key: str, value) -> None: try: object.__getattribute__(self, key) return object.__setattr__(self, key, value) except AttributeError: pass if (key,) in self._internal.column_labels: self[key] = value else: msg = "Koalas doesn't allow columns to be created via a new attribute name" if is_testing(): raise AssertionError(msg) else: warnings.warn(msg, UserWarning) def __len__(self): return self._internal.resolved_copy.spark_frame.count() def __dir__(self): fields = [ f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f ] return super().__dir__() + fields def __iter__(self): return iter(self.columns) # NDArray Compat def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any): # TODO: is it possible to deduplicate it with '_map_series_op'? if all(isinstance(inp, DataFrame) for inp in inputs) and any( not same_anchor(inp, inputs[0]) for inp in inputs ): # binary only assert len(inputs) == 2 this = inputs[0] that = inputs[1] if this._internal.column_labels_level != that._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") # Different DataFrames def apply_op(kdf, this_column_labels, that_column_labels): for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( ufunc( kdf._kser_for(this_label), kdf._kser_for(that_label), **kwargs ).rename(this_label), this_label, ) return align_diff_frames(apply_op, this, that, fillna=True, how="full") else: # DataFrame and Series applied = [] this = inputs[0] assert all(inp is this for inp in inputs if isinstance(inp, DataFrame)) for label in this._internal.column_labels: arguments = [] for inp in inputs: arguments.append(inp[label] if isinstance(inp, DataFrame) else inp) # both binary and unary. applied.append(ufunc(*arguments, **kwargs).rename(label)) internal = this._internal.with_new_columns(applied) return DataFrame(internal) if sys.version_info >= (3, 7): def __class_getitem__(cls, params): # This is a workaround to support variadic generic in DataFrame in Python 3.7. # See https://github.com/python/typing/issues/193 # we always wraps the given type hints by a tuple to mimic the variadic generic. return _create_tuple_for_frame_type(params) elif (3, 5) <= sys.version_info < (3, 7): # This is a workaround to support variadic generic in DataFrame in Python 3.5+ # The implementation is in its metaclass so this flag is needed to distinguish # Koalas DataFrame. is_dataframe = None def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.limit(2).toPandas() assert len(l) == 1, (sdf, l) row = l.iloc[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2 class CachedDataFrame(DataFrame): """ Cached Koalas DataFrame, which corresponds to pandas DataFrame logically, but internally it caches the corresponding Spark DataFrame. """ def __init__(self, internal, storage_level=None): if storage_level is None: object.__setattr__(self, "_cached", internal.spark_frame.cache()) elif isinstance(storage_level, StorageLevel): object.__setattr__(self, "_cached", internal.spark_frame.persist(storage_level)) else: raise TypeError( "Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`" ) super().__init__(internal) def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.spark.unpersist() # create accessor for Spark related methods. spark = CachedAccessor("spark", CachedSparkFrameMethods) @property def storage_level(self) -> StorageLevel: warnings.warn( "DataFrame.storage_level is deprecated as of DataFrame.spark.storage_level. " "Please use the API instead.", FutureWarning, ) return self.spark.storage_level storage_level.__doc__ = CachedSparkFrameMethods.storage_level.__doc__ def unpersist(self) -> None: warnings.warn( "DataFrame.unpersist is deprecated as of DataFrame.spark.unpersist. " "Please use the API instead.", FutureWarning, ) return self.spark.unpersist() unpersist.__doc__ = CachedSparkFrameMethods.unpersist.__doc__
1
18,017
nit: Could you add an empty line between `import datetime` (built-in library block) and `import numpy as np` (third-party library block)?
databricks-koalas
py
@@ -1284,6 +1284,8 @@ const std::map<llvm::StringRef, hipCounter> CUDA_DRIVER_TYPE_NAME_MAP{ {"CUDA_ERROR_INVALID_PC", {"hipErrorInvalidPc", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 718 // cudaErrorLaunchFailure {"CUDA_ERROR_LAUNCH_FAILED", {"hipErrorLaunchFailure", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 719 + // cudaErrorCooperativeLaunchTooLarge + {"CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE", {"hipErrorCooperativeLaunchTooLarge", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 720 // cudaErrorNotPermitted {"CUDA_ERROR_NOT_PERMITTED", {"hipErrorNotPermitted", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 800 // cudaErrorNotSupported
1
/* Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "CUDA2HIP.h" // Maps the names of CUDA DRIVER API types to the corresponding HIP types const std::map<llvm::StringRef, hipCounter> CUDA_DRIVER_TYPE_NAME_MAP{ // 1. Structs {"CUDA_ARRAY3D_DESCRIPTOR_st", {"HIP_ARRAY3D_DESCRIPTOR", "", CONV_TYPE, API_DRIVER}}, {"CUDA_ARRAY3D_DESCRIPTOR", {"HIP_ARRAY3D_DESCRIPTOR", "", CONV_TYPE, API_DRIVER}}, {"CUDA_ARRAY_DESCRIPTOR_st", {"HIP_ARRAY_DESCRIPTOR", "", CONV_TYPE, API_DRIVER}}, {"CUDA_ARRAY_DESCRIPTOR", {"HIP_ARRAY_DESCRIPTOR", "", CONV_TYPE, API_DRIVER}}, // cudaExternalMemoryBufferDesc {"CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st", {"HIP_EXTERNAL_MEMORY_BUFFER_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_EXTERNAL_MEMORY_BUFFER_DESC", {"HIP_EXTERNAL_MEMORY_BUFFER_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalMemoryHandleDesc {"CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st", {"HIP_EXTERNAL_MEMORY_HANDLE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_EXTERNAL_MEMORY_HANDLE_DESC", {"HIP_EXTERNAL_MEMORY_HANDLE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalMemoryMipmappedArrayDesc {"CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st", {"HIP_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC", {"HIP_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalSemaphoreHandleDesc {"CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st", {"HIP_EXTERNAL_SEMAPHORE_HANDLE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC", {"HIP_EXTERNAL_SEMAPHORE_HANDLE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalSemaphoreSignalParams {"CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st", {"HIP_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS", {"HIP_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalSemaphoreWaitParams {"CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st", {"HIP_EXTERNAL_SEMAPHORE_WAIT_PARAMS", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS", {"HIP_EXTERNAL_SEMAPHORE_WAIT_PARAMS", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaHostNodeParams {"CUDA_HOST_NODE_PARAMS_st", {"hipHostNodeParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_HOST_NODE_PARAMS", {"hipHostNodeParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaKernelNodeParams {"CUDA_KERNEL_NODE_PARAMS_st", {"hipKernelNodeParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_KERNEL_NODE_PARAMS", {"hipKernelNodeParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue // NOTE: cudaLaunchParams struct differs {"CUDA_LAUNCH_PARAMS_st", {"hipLaunchParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_LAUNCH_PARAMS", {"hipLaunchParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_MEMCPY2D_st", {"hip_Memcpy2D", "", CONV_TYPE, API_DRIVER}}, {"CUDA_MEMCPY2D", {"hip_Memcpy2D", "", CONV_TYPE, API_DRIVER}}, // no analogue {"CUDA_MEMCPY3D_st", {"hip_Memcpy3D", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_MEMCPY3D", {"hip_Memcpy3D", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_MEMCPY3D_PEER_st", {"hip_Memcpy3D_Peer", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_MEMCPY3D_PEER", {"hip_Memcpy3D_Peer", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaMemsetParams {"CUDA_MEMSET_NODE_PARAMS_st", {"hipMemsetParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_MEMSET_NODE_PARAMS", {"hipMemsetParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st", {"HIP_POINTER_ATTRIBUTE_P2P_TOKENS", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_POINTER_ATTRIBUTE_P2P_TOKENS", {"HIP_POINTER_ATTRIBUTE_P2P_TOKENS", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue // NOTE: cudaResourceDesc struct differs {"CUDA_RESOURCE_DESC_st", {"HIP_RESOURCE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_RESOURCE_DESC", {"HIP_RESOURCE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaResourceViewDesc // NOTE: cudaResourceViewDesc hasn't reserved bytes in the end {"CUDA_RESOURCE_VIEW_DESC_st", {"HIP_RESOURCE_VIEW_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_RESOURCE_VIEW_DESC", {"HIP_RESOURCE_VIEW_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue // NOTE: cudaTextureDesc differs {"CUDA_TEXTURE_DESC_st", {"HIP_TEXTURE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUDA_TEXTURE_DESC", {"HIP_TEXTURE_DESC", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue // NOTE: cudaDeviceProp differs {"CUdevprop_st", {"hipDeviceProp_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUdevprop", {"hipDeviceProp_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaIpcEventHandle_st {"CUipcEventHandle_st", {"ihipIpcEventHandle_t", "", CONV_TYPE, API_DRIVER}}, // cudaIpcEventHandle_t {"CUipcEventHandle", {"ihipIpcEventHandle_t", "", CONV_TYPE, API_DRIVER}}, // cudaIpcMemHandle_st {"CUipcMemHandle_st", {"hipIpcMemHandle_st", "", CONV_TYPE, API_DRIVER}}, // cudaIpcMemHandle_t {"CUipcMemHandle", {"hipIpcMemHandle_t", "", CONV_TYPE, API_DRIVER}}, // CUDA: "The types CUarray and cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other." // cudaArray {"CUarray_st", {"hipArray", "", CONV_TYPE, API_DRIVER}}, // cudaArray_t {"CUarray", {"hipArray *", "", CONV_TYPE, API_DRIVER}}, // no analogue {"CUctx_st", {"ihipCtx_t", "", CONV_TYPE, API_DRIVER}}, {"CUcontext", {"hipCtx_t", "", CONV_TYPE, API_DRIVER}}, // CUeglStreamConnection_st {"CUeglStreamConnection_st", {"hipEglStreamConnection", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaEglStreamConnection {"CUeglStreamConnection", {"hipEglStreamConnection *", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // the same - CUevent_st {"CUevent_st", {"ihipEvent_t", "", CONV_TYPE, API_DRIVER}}, // cudaEvent_t {"CUevent", {"hipEvent_t", "", CONV_TYPE, API_DRIVER}}, // CUexternalMemory_st {"CUextMemory_st", {"hipExtMemory_st", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalMemory_t {"CUexternalMemory", {"hipExternalMemory", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUexternalSemaphore_st {"CUextSemaphore_st", {"hipExtSemaphore_st", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaExternalSemaphore_t {"CUexternalSemaphore", {"hipExternalSemaphore", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CUfunc_st", {"ihipModuleSymbol_t", "", CONV_TYPE, API_DRIVER}}, {"CUfunction", {"hipFunction_t", "", CONV_TYPE, API_DRIVER}}, // the same - CUgraph_st {"CUgraph_st", {"hipGraph_st", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraph_t {"CUgraph", {"hipGraph", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // the same -CUgraphExec_st {"CUgraphExec_st", {"hipGraphExec_st", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraphExec_t {"CUgraphExec", {"hipGraphExec", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraphicsResource {"CUgraphicsResource_st", {"hipGraphicsResource_st", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraphicsResource_t {"CUgraphicsResource", {"hipGraphicsResource_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // the same - CUgraphNode_st {"CUgraphNode_st", {"hipGraphNode_st", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraphNode_t {"CUgraphNode", {"hipGraphNode", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaMipmappedArray {"CUmipmappedArray_st", {"hipMipmappedArray_st", "", CONV_TYPE, API_DRIVER}}, // cudaMipmappedArray_t {"CUmipmappedArray", {"hipMipmappedArray_t", "", CONV_TYPE, API_DRIVER}}, // no analogue {"CUmod_st", {"ihipModule_t", "", CONV_TYPE, API_DRIVER}}, {"CUmodule", {"hipModule_t", "", CONV_TYPE, API_DRIVER}}, // the same - CUstream_st {"CUstream_st", {"ihipStream_t", "", CONV_TYPE, API_DRIVER}}, // cudaStream_t {"CUstream", {"hipStream_t", "", CONV_TYPE, API_DRIVER}}, // NOTE: possibly surfaceReference is analogue {"CUsurfref_st", {"ihipSurfaceReference_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUsurfref", {"hipSurfaceReference_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // textureReference {"CUtexref_st", {"textureReference", "", CONV_TYPE, API_DRIVER}}, {"CUtexref", {"hipTextureReference_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUuuid_st // NOTE: the same struct and its name {"CUuuid_st", {"hipUUID", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUuuid", {"hipUUID", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CUmemLocation_st", {"hipMemoryLocation", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemLocation", {"hipMemoryLocation", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CUmemAllocationProp_st", {"hipMemoryAllocationProperties", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemAllocationProp", {"hipMemoryAllocationProperties", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CUmemAccessDesc_st", {"hipMemoryAccessDescription", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemAccessDesc", {"hipMemoryAccessDescription", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // 2. Unions {"CUstreamBatchMemOpParams", {"hipStreamBatchMemOpParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstreamBatchMemOpParams_union", {"hipStreamBatchMemOpParams", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // 3. Enums {"CUaddress_mode", {"hipTextureAddressMode", "", CONV_TYPE, API_DRIVER}}, {"CUaddress_mode_enum", {"hipTextureAddressMode", "", CONV_TYPE, API_DRIVER}}, // CUaddress_mode enum values {"CU_TR_ADDRESS_MODE_WRAP", {"hipAddressModeWrap", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0 {"CU_TR_ADDRESS_MODE_CLAMP", {"hipAddressModeClamp", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 1 {"CU_TR_ADDRESS_MODE_MIRROR", {"hipAddressModeMirror", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 2 {"CU_TR_ADDRESS_MODE_BORDER", {"hipAddressModeBorder", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 3 {"CUarray_cubemap_face", {"hipGraphicsCubeFace", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUarray_cubemap_face_enum", {"hipGraphicsCubeFace", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUarray_cubemap_face enum values // cudaGraphicsCubeFacePositiveX {"CU_CUBEMAP_FACE_POSITIVE_X", {"hipGraphicsCubeFacePositiveX", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaGraphicsCubeFaceNegativeX {"CU_CUBEMAP_FACE_NEGATIVE_X", {"hipGraphicsCubeFaceNegativeX", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaGraphicsCubeFacePositiveY {"CU_CUBEMAP_FACE_POSITIVE_Y", {"hipGraphicsCubeFacePositiveY", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaGraphicsCubeFaceNegativeY {"CU_CUBEMAP_FACE_NEGATIVE_Y", {"hipGraphicsCubeFaceNegativeY", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaGraphicsCubeFacePositiveZ {"CU_CUBEMAP_FACE_POSITIVE_Z", {"hipGraphicsCubeFacePositiveZ", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // cudaGraphicsCubeFaceNegativeZ {"CU_CUBEMAP_FACE_NEGATIVE_Z", {"hipGraphicsCubeFaceNegativeZ", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x05 {"CUarray_format", {"hipArray_format", "", CONV_TYPE, API_DRIVER}}, {"CUarray_format_enum", {"hipArray_format", "", CONV_TYPE, API_DRIVER}}, // CUarray_format enum values {"CU_AD_FORMAT_UNSIGNED_INT8", {"HIP_AD_FORMAT_UNSIGNED_INT8", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 {"CU_AD_FORMAT_UNSIGNED_INT16", {"HIP_AD_FORMAT_UNSIGNED_INT16", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 {"CU_AD_FORMAT_UNSIGNED_INT32", {"HIP_AD_FORMAT_UNSIGNED_INT32", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x03 {"CU_AD_FORMAT_SIGNED_INT8", {"HIP_AD_FORMAT_SIGNED_INT8", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x08 {"CU_AD_FORMAT_SIGNED_INT16", {"HIP_AD_FORMAT_SIGNED_INT16", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x09 {"CU_AD_FORMAT_SIGNED_INT32", {"HIP_AD_FORMAT_SIGNED_INT32", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0a {"CU_AD_FORMAT_HALF", {"HIP_AD_FORMAT_HALF", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x10 {"CU_AD_FORMAT_FLOAT", {"HIP_AD_FORMAT_FLOAT", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x20 // cudaComputeMode {"CUcomputemode", {"hipComputeMode", "", CONV_TYPE, API_DRIVER}}, {"CUcomputemode_enum", {"hipComputeMode", "", CONV_TYPE, API_DRIVER}}, // CUcomputemode enum values // cudaComputeModeDefault {"CU_COMPUTEMODE_DEFAULT", {"hipComputeModeDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0 // cudaComputeModeExclusive // NOTE: Deprecated since CUDA 10.0 {"CU_COMPUTEMODE_EXCLUSIVE", {"hipComputeModeExclusive", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 1 // cudaComputeModeProhibited {"CU_COMPUTEMODE_PROHIBITED", {"hipComputeModeProhibited", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 2 // cudaComputeModeExclusiveProcess {"CU_COMPUTEMODE_EXCLUSIVE_PROCESS", {"hipComputeModeExclusiveProcess", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 3 {"CUctx_flags", {"hipCctx_flags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUctx_flags_enum", {"hipCctx_flags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUctx_flags enum values // cudaDeviceScheduleAuto {"CU_CTX_SCHED_AUTO", {"hipDeviceScheduleAuto", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x00 // cudaDeviceScheduleSpin {"CU_CTX_SCHED_SPIN", {"hipDeviceScheduleSpin", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 // cudaDeviceScheduleYield {"CU_CTX_SCHED_YIELD", {"hipDeviceScheduleYield", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 // cudaDeviceScheduleBlockingSync {"CU_CTX_SCHED_BLOCKING_SYNC", {"hipDeviceScheduleBlockingSync", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x04 // cudaDeviceBlockingSync // NOTE: Deprecated since CUDA 4.0 and replaced with CU_CTX_SCHED_BLOCKING_SYNC {"CU_CTX_BLOCKING_SYNC", {"hipDeviceScheduleBlockingSync", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x04 // cudaDeviceScheduleMask {"CU_CTX_SCHED_MASK", {"hipDeviceScheduleMask", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x07 // cudaDeviceMapHost {"CU_CTX_MAP_HOST", {"hipDeviceMapHost", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x08 // cudaDeviceLmemResizeToMax {"CU_CTX_LMEM_RESIZE_TO_MAX", {"hipDeviceLmemResizeToMax", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x10 // cudaDeviceMask {"CU_CTX_FLAGS_MASK", {"hipDeviceMask", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1f // cudaDeviceAttr {"CUdevice_attribute", {"hipDeviceAttribute_t", "", CONV_TYPE, API_DRIVER}}, {"CUdevice_attribute_enum", {"hipDeviceAttribute_t", "", CONV_TYPE, API_DRIVER}}, // CUdevice_attribute enum values // cudaDevAttrMaxThreadsPerBlock {"CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK", {"hipDeviceAttributeMaxThreadsPerBlock", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 1 // cudaDevAttrMaxBlockDimX {"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X", {"hipDeviceAttributeMaxBlockDimX", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 2 // cudaDevAttrMaxBlockDimY {"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y", {"hipDeviceAttributeMaxBlockDimY", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 3 // cudaDevAttrMaxBlockDimZ {"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z", {"hipDeviceAttributeMaxBlockDimZ", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 4 // cudaDevAttrMaxGridDimX {"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X", {"hipDeviceAttributeMaxGridDimX", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 5 // cudaDevAttrMaxGridDimY {"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y", {"hipDeviceAttributeMaxGridDimY", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 6 // cudaDevAttrMaxGridDimZ {"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z", {"hipDeviceAttributeMaxGridDimZ", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 7 // cudaDevAttrMaxSharedMemoryPerBlock {"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK", {"hipDeviceAttributeMaxSharedMemoryPerBlock", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 8 // no analogue // NOTE: Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK {"CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK", {"hipDeviceAttributeMaxSharedMemoryPerBlock", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 8 // cudaDevAttrTotalConstantMemory {"CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY", {"hipDeviceAttributeTotalConstantMemory", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 9 // cudaDevAttrWarpSize {"CU_DEVICE_ATTRIBUTE_WARP_SIZE", {"hipDeviceAttributeWarpSize", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 10 // cudaDevAttrMaxPitch {"CU_DEVICE_ATTRIBUTE_MAX_PITCH", {"hipDeviceAttributeMaxPitch", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 11 // cudaDevAttrMaxRegistersPerBlock {"CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK", {"hipDeviceAttributeMaxRegistersPerBlock", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 12 // no analogue {"CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK", {"hipDeviceAttributeMaxRegistersPerBlock", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 12 // cudaDevAttrClockRate {"CU_DEVICE_ATTRIBUTE_CLOCK_RATE", {"hipDeviceAttributeClockRate", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 13 // cudaDevAttrTextureAlignment {"CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT", {"hipDeviceAttributeTextureAlignment", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 14 // cudaDevAttrGpuOverlap // NOTE: Deprecated, use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT {"CU_DEVICE_ATTRIBUTE_GPU_OVERLAP", {"hipDeviceAttributeAsyncEngineCount", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 15 // cudaDevAttrMultiProcessorCount {"CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT", {"hipDeviceAttributeMultiprocessorCount", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 16 // cudaDevAttrKernelExecTimeout {"CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT", {"hipDeviceAttributeKernelExecTimeout", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 17 // cudaDevAttrIntegrated {"CU_DEVICE_ATTRIBUTE_INTEGRATED", {"hipDeviceAttributeIntegrated", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 18 // cudaDevAttrCanMapHostMemory {"CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY", {"hipDeviceAttributeCanMapHostMemory", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 19 // cudaDevAttrComputeMode {"CU_DEVICE_ATTRIBUTE_COMPUTE_MODE", {"hipDeviceAttributeComputeMode", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 20 // cudaDevAttrMaxTexture1DWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH", {"hipDeviceAttributeMaxTexture1DWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 21 // cudaDevAttrMaxTexture2DWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH", {"hipDeviceAttributeMaxTexture2DWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 22 // cudaDevAttrMaxTexture2DHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT", {"hipDeviceAttributeMaxTexture2DHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 23 // cudaDevAttrMaxTexture3DWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH", {"hipDeviceAttributeMaxTexture3DWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 24 // cudaDevAttrMaxTexture3DHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT", {"hipDeviceAttributeMaxTexture3DHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 25 // cudaDevAttrMaxTexture3DDepth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH", {"hipDeviceAttributeMaxTexture3DDepth", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 26 // cudaDevAttrMaxTexture2DLayeredWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH", {"hipDeviceAttributeMaxTexture2DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 27 // cudaDevAttrMaxTexture2DLayeredHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT", {"hipDeviceAttributeMaxTexture2DLayeredHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 28 // cudaDevAttrMaxTexture2DLayeredLayers {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS", {"hipDeviceAttributeMaxTexture2DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 29 // cudaDevAttrMaxTexture2DLayeredWidth // NOTE: Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH", {"hipDeviceAttributeMaxTexture2DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 27 // cudaDevAttrMaxTexture2DLayeredHeight // NOTE: Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT", {"hipDeviceAttributeMaxTexture2DLayeredHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 28 // cudaDevAttrMaxTexture2DLayeredLayers // NOTE: Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES", {"hipDeviceAttributeMaxTexture2DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 29 // cudaDevAttrSurfaceAlignment {"CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT", {"hipDeviceAttributeSurfaceAlignment", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 30 // cudaDevAttrConcurrentKernels {"CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS", {"hipDeviceAttributeConcurrentKernels", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 31 // cudaDevAttrEccEnabled {"CU_DEVICE_ATTRIBUTE_ECC_ENABLED", {"hipDeviceAttributeEccEnabled", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 32 // cudaDevAttrPciBusId {"CU_DEVICE_ATTRIBUTE_PCI_BUS_ID", {"hipDeviceAttributePciBusId", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 33 // cudaDevAttrPciDeviceId {"CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID", {"hipDeviceAttributePciDeviceId", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 34 // cudaDevAttrTccDriver {"CU_DEVICE_ATTRIBUTE_TCC_DRIVER", {"hipDeviceAttributeTccDriver", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 35 // cudaDevAttrMemoryClockRate {"CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE", {"hipDeviceAttributeMemoryClockRate", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 36 // cudaDevAttrGlobalMemoryBusWidth {"CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH", {"hipDeviceAttributeMemoryBusWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 37 // cudaDevAttrL2CacheSize {"CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE", {"hipDeviceAttributeL2CacheSize", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 38 // cudaDevAttrMaxThreadsPerMultiProcessor {"CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR", {"hipDeviceAttributeMaxThreadsPerMultiProcessor", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 39 // cudaDevAttrAsyncEngineCount {"CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT", {"hipDeviceAttributeAsyncEngineCount", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 40 // cudaDevAttrUnifiedAddressing {"CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING", {"hipDeviceAttributeUnifiedAddressing", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 41 // cudaDevAttrMaxTexture1DLayeredWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH", {"hipDeviceAttributeMaxTexture1DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 42 // cudaDevAttrMaxTexture1DLayeredLayers {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS", {"hipDeviceAttributeMaxTexture1DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 43 // no analogue // NOTE: Deprecated, do not use {"CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER", {"hipDeviceAttributeCanTex2DGather", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 44 // cudaDevAttrMaxTexture2DGatherWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH", {"hipDeviceAttributeMaxTexture2DGatherWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 45 // cudaDevAttrMaxTexture2DGatherHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT", {"hipDeviceAttributeMaxTexture2DGatherHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 46 // cudaDevAttrMaxTexture3DWidthAlt {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE", {"hipDeviceAttributeMaxTexture3DWidthAlternate", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 47 // cudaDevAttrMaxTexture3DHeightAlt {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE", {"hipDeviceAttributeMaxTexture3DHeightAlternate", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 48 // cudaDevAttrMaxTexture3DDepthAlt {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE", {"hipDeviceAttributeMaxTexture3DDepthAlternate", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 49 // cudaDevAttrPciDomainId {"CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID", {"hipDeviceAttributePciDomainId", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 50 // cudaDevAttrTexturePitchAlignment {"CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT", {"hipDeviceAttributeTexturePitchAlignment", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 51 // cudaDevAttrMaxTextureCubemapWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH", {"hipDeviceAttributeMaxTextureCubemapWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 52 // cudaDevAttrMaxTextureCubemapLayeredWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH", {"hipDeviceAttributeMaxTextureCubemapLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 53 // cudaDevAttrMaxTextureCubemapLayeredLayers {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS", {"hipDeviceAttributeMaxTextureCubemapLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 54 // cudaDevAttrMaxSurface1DWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH", {"hipDeviceAttributeMaxSurface1DWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 55 // cudaDevAttrMaxSurface2DWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH", {"hipDeviceAttributeMaxSurface2DWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 56 // cudaDevAttrMaxSurface2DHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT", {"hipDeviceAttributeMaxSurface2DHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 57 // cudaDevAttrMaxSurface3DWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH", {"hipDeviceAttributeMaxSurface3DWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 58 // cudaDevAttrMaxSurface3DHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT", {"hipDeviceAttributeMaxSurface3DHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 59 // cudaDevAttrMaxSurface3DDepth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH", {"hipDeviceAttributeMaxSurface3DDepth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 60 // cudaDevAttrMaxSurface1DLayeredWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH", {"hipDeviceAttributeMaxSurface1DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 61 // cudaDevAttrMaxSurface1DLayeredLayers {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS", {"hipDeviceAttributeMaxSurface1DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 62 // cudaDevAttrMaxSurface2DLayeredWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH", {"hipDeviceAttributeMaxSurface2DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 63 // cudaDevAttrMaxSurface2DLayeredHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT", {"hipDeviceAttributeMaxSurface2DLayeredHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 64 // cudaDevAttrMaxSurface2DLayeredLayers {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS", {"hipDeviceAttributeMaxSurface2DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 65 // cudaDevAttrMaxSurfaceCubemapWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH", {"hipDeviceAttributeMaxSurfaceCubemapWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 66 // cudaDevAttrMaxSurfaceCubemapLayeredWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH", {"hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 67 // cudaDevAttrMaxSurfaceCubemapLayeredLayers {"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS", {"hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 68 // cudaDevAttrMaxTexture1DLinearWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH", {"hipDeviceAttributeMaxTexture1DLinearWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 69 // cudaDevAttrMaxTexture2DLinearWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH", {"hipDeviceAttributeMaxTexture2DLinearWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 70 // cudaDevAttrMaxTexture2DLinearHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT", {"hipDeviceAttributeMaxTexture2DLinearHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 71 // cudaDevAttrMaxTexture2DLinearPitch {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH", {"hipDeviceAttributeMaxTexture2DLinearPitch", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 72 // cudaDevAttrMaxTexture2DMipmappedWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH", {"hipDeviceAttributeMaxTexture2DMipmappedWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 73 // cudaDevAttrMaxTexture2DMipmappedHeight {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT", {"hipDeviceAttributeMaxTexture2DMipmappedHeight", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 74 // cudaDevAttrComputeCapabilityMajor {"CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR", {"hipDeviceAttributeComputeCapabilityMajor", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 75 // cudaDevAttrComputeCapabilityMinor {"CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR", {"hipDeviceAttributeComputeCapabilityMinor", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 76 // cudaDevAttrMaxTexture1DMipmappedWidth {"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH", {"hipDeviceAttributeMaxTexture1DMipmappedWidth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 77 // cudaDevAttrStreamPrioritiesSupported {"CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED", {"hipDeviceAttributeStreamPrioritiesSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 78 // cudaDevAttrGlobalL1CacheSupported {"CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED", {"hipDeviceAttributeGlobalL1CacheSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 79 // cudaDevAttrLocalL1CacheSupported {"CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED", {"hipDeviceAttributeLocalL1CacheSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 80 // cudaDevAttrMaxSharedMemoryPerMultiprocessor {"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR", {"hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 81 // cudaDevAttrMaxRegistersPerMultiprocessor {"CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR", {"hipDeviceAttributeMaxRegistersPerMultiprocessor", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 82 // cudaDevAttrManagedMemory {"CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY", {"hipDeviceAttributeManagedMemory", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 83 // cudaDevAttrIsMultiGpuBoard {"CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD", {"hipDeviceAttributeIsMultiGpuBoard", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 84 // cudaDevAttrMultiGpuBoardGroupID {"CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID", {"hipDeviceAttributeMultiGpuBoardGroupId", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 85 // cudaDevAttrHostNativeAtomicSupported {"CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED", {"hipDeviceAttributeHostNativeAtomicSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 86 // cudaDevAttrSingleToDoublePrecisionPerfRatio {"CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO", {"hipDeviceAttributeSingleToDoublePrecisionPerfRatio", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 87 // cudaDevAttrPageableMemoryAccess {"CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS", {"hipDeviceAttributePageableMemoryAccess", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 88 // cudaDevAttrConcurrentManagedAccess {"CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS", {"hipDeviceAttributeConcurrentManagedAccess", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 89 // cudaDevAttrComputePreemptionSupported {"CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED", {"hipDeviceAttributeComputePreemptionSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 90 // cudaDevAttrCanUseHostPointerForRegisteredMem {"CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM", {"hipDeviceAttributeCanUseHostPointerForRegisteredMem", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 91 // no analogue: cudaDevAttrReserved92 {"CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS", {"hipDeviceAttributeCanUseStreamMemOps", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 92 // no analogue: cudaDevAttrReserved93 {"CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS", {"hipDeviceAttributeCanUse64BitStreamMemOps", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 93 // no analogue: cudaDevAttrReserved94 {"CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR", {"hipDeviceAttributeCanUseStreamWaitValueNor", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 94 // cudaDevAttrCooperativeLaunch {"CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH", {"hipDeviceAttributeCooperativeLaunch", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 95 // cudaDevAttrCooperativeMultiDeviceLaunch {"CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH", {"hipDeviceAttributeCooperativeMultiDeviceLaunch", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 96 // cudaDevAttrMaxSharedMemoryPerBlockOptin {"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN", {"hipDeviceAttributeMaxSharedMemoryPerBlockOptin", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 97 // cudaDevAttrCanFlushRemoteWrites {"CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES", {"hipDeviceAttributeCanFlushRemoteWrites", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 98 // cudaDevAttrHostRegisterSupported {"CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED", {"hipDeviceAttributeHostRegisterSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 99 // cudaDevAttrPageableMemoryAccessUsesHostPageTables {"CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES", {"hipDeviceAttributePageableMemoryAccessUsesHostPageTables", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 100 // cudaDevAttrDirectManagedMemAccessFromHost {"CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST", {"hipDeviceAttributeDirectManagedMemAccessFromHost", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 101 // no analogue {"CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED", {"hipDeviceAttributeVirtualAddressManagementSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 102 // no analogue {"CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED", {"hipDeviceAttributeHandleTypePosixFileDescriptorSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 103 // no analogue {"CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED", {"hipDeviceAttributeHandleTypeWin32HandleSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 104 // no analogue {"CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED", {"hipDeviceAttributeHandleTypeWin32KmtHandleSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 105 // no analogue {"CU_DEVICE_ATTRIBUTE_MAX", {"hipDeviceAttributeMax", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 106 // cudaDeviceP2PAttr {"CUdevice_P2PAttribute", {"hipDeviceP2PAttribute", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUdevice_P2PAttribute_enum", {"hipDeviceP2PAttribute", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUdevice_P2PAttribute enum values // cudaDevP2PAttrPerformanceRank = 1 {"CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK", {"hipDeviceP2PAttributePerformanceRank", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaDevP2PAttrAccessSupported = 2 {"CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED", {"hipDeviceP2PAttributeAccessSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaDevP2PAttrNativeAtomicSupported = 3 {"CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED", {"hipDeviceP2PAttributeNativeAtomicSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaDevP2PAttrCudaArrayAccessSupported = 4 // NOTE" deprecated, use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead {"CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED", {"hipDevP2PAttributeCudaArrayAccessSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // NOTE" deprecated, use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead {"CU_DEVICE_P2P_ATTRIBUTE_ARRAY_ACCESS_ACCESS_SUPPORTED", {"hipDevP2PAttributeCudaArrayAccessSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // cudaDevP2PAttrCudaArrayAccessSupported = 4 {"CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED", {"hipDevP2PAttributeCudaArrayAccessSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // cudaEGL.h - presented only on Linux in nvidia-cuda-dev package // cudaEglColorFormat {"CUeglColorFormat", {"hipEglColorFormat", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUeglColorFormate_enum", {"hipEglColorFormat", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUeglColorFormat enum values // cudaEglColorFormatYUV420Planar = 0 {"CU_EGL_COLOR_FORMAT_YUV420_PLANAR", {"hipEglColorFormatYUV420Planar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaEglColorFormatYUV420SemiPlanar = 1 {"CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR", {"hipEglColorFormatYUV420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaEglColorFormatYUV422Planar = 2 {"CU_EGL_COLOR_FORMAT_YUV422_PLANAR", {"hipEglColorFormatYUV422Planar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaEglColorFormatYUV422SemiPlanar = 3 {"CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR", {"hipEglColorFormatYUV422SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaEglColorFormatRGB = 4 {"CU_EGL_COLOR_FORMAT_RGB", {"hipEglColorFormatRGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // cudaEglColorFormatBGR = 5 {"CU_EGL_COLOR_FORMAT_BGR", {"hipEglColorFormatBGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x05 // cudaEglColorFormatARGB = 6 {"CU_EGL_COLOR_FORMAT_ARGB", {"hipEglColorFormatARGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x06 // cudaEglColorFormatRGBA = 7 {"CU_EGL_COLOR_FORMAT_RGBA", {"hipEglColorFormatRGBA", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x07 // cudaEglColorFormatL = 8 {"CU_EGL_COLOR_FORMAT_L", {"hipEglColorFormatL", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x08 // cudaEglColorFormatR = 9 {"CU_EGL_COLOR_FORMAT_R", {"hipEglColorFormatR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x09 // cudaEglColorFormatYUV444Planar = 10 {"CU_EGL_COLOR_FORMAT_YUV444_PLANAR", {"hipEglColorFormatYUV444Planar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0A // cudaEglColorFormatYUV444SemiPlanar = 11 {"CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR", {"hipEglColorFormatYUV444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0B // cudaEglColorFormatYUYV422 = 12 {"CU_EGL_COLOR_FORMAT_YUYV_422", {"hipEglColorFormatYUYV422", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0C // cudaEglColorFormatUYVY422 = 13 {"CU_EGL_COLOR_FORMAT_UYVY_422", {"hipEglColorFormatUYVY422", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0D // cudaEglColorFormatABGR = 14 {"CU_EGL_COLOR_FORMAT_ABGR", {"hipEglColorFormatABGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0E // cudaEglColorFormatBGRA = 15 {"CU_EGL_COLOR_FORMAT_BGRA", {"hipEglColorFormatBGRA", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0F // cudaEglColorFormatA = 16 {"CU_EGL_COLOR_FORMAT_A", {"hipEglColorFormatA", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x10 // cudaEglColorFormatRG = 17 {"CU_EGL_COLOR_FORMAT_RG", {"hipEglColorFormatRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x11 // cudaEglColorFormatAYUV = 18 {"CU_EGL_COLOR_FORMAT_AYUV", {"hipEglColorFormatAYUV", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x12 // cudaEglColorFormatYVU444SemiPlanar = 19 {"CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR", {"hipEglColorFormatYVU444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x13 // cudaEglColorFormatYVU422SemiPlanar = 20 {"CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR", {"hipEglColorFormatYVU422SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x14 // cudaEglColorFormatYVU420SemiPlanar = 21 {"CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR", {"hipEglColorFormatYVU420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x15 // cudaEglColorFormatYVU420SemiPlanar = 22 {"CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR", {"hipEglColorFormatY10V10U10_444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x16 // cudaEglColorFormatY10V10U10_420SemiPlanar = 23 {"CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR", {"hipEglColorFormatY10V10U10_420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x17 // cudaEglColorFormatY12V12U12_444SemiPlanar = 24 {"CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR", {"hipEglColorFormatY12V12U12_444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x18 // cudaEglColorFormatY12V12U12_420SemiPlanar = 25 {"CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR", {"hipEglColorFormatY12V12U12_420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x19 // cudaEglColorFormatVYUY_ER = 26 {"CU_EGL_COLOR_FORMAT_VYUY_ER", {"hipEglColorFormatVYUY_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1A // cudaEglColorFormatUYVY_ER = 27 {"CU_EGL_COLOR_FORMAT_UYVY_ER", {"hipEglColorFormatUYVY_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1B // cudaEglColorFormatYUYV_ER = 28 {"CU_EGL_COLOR_FORMAT_YUYV_ER", {"hipEglColorFormatYUYV_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1C // cudaEglColorFormatYVYU_ER = 29 {"CU_EGL_COLOR_FORMAT_YVYU_ER", {"hipEglColorFormatYVYU_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1D // cudaEglColorFormatYUV_ER = 30 {"CU_EGL_COLOR_FORMAT_YUV_ER", {"hipEglColorFormatYUV_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1E // cudaEglColorFormatYUVA_ER = 31 {"CU_EGL_COLOR_FORMAT_YUVA_ER", {"hipEglColorFormatYUVA_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1F // cudaEglColorFormatAYUV_ER = 32 {"CU_EGL_COLOR_FORMAT_AYUV_ER", {"hipEglColorFormatAYUV_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x20 // cudaEglColorFormatYUV444Planar_ER = 33 {"CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER", {"hipEglColorFormatYUV444Planar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x21 // cudaEglColorFormatYUV422Planar_ER = 34 {"CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER", {"hipEglColorFormatYUV422Planar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x22 // cudaEglColorFormatYUV420Planar_ER = 35 {"CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER", {"hipEglColorFormatYUV420Planar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x23 // cudaEglColorFormatYUV444SemiPlanar_ER = 36 {"CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER", {"hipEglColorFormatYUV444SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x24 // cudaEglColorFormatYUV422SemiPlanar_ER = 37 {"CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER", {"hipEglColorFormatYUV422SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x25 // cudaEglColorFormatYUV420SemiPlanar_ER = 38 {"CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER", {"hipEglColorFormatYUV420SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x26 // cudaEglColorFormatYVU444Planar_ER = 39 {"CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER", {"hipEglColorFormatYVU444Planar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x27 // cudaEglColorFormatYVU422Planar_ER = 40 {"CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER", {"hipEglColorFormatYVU422Planar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x28 // cudaEglColorFormatYVU420Planar_ER = 41 {"CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER", {"hipEglColorFormatYVU420Planar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x29 // cudaEglColorFormatYVU444SemiPlanar_ER = 42 {"CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER", {"hipEglColorFormatYVU444SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2A // cudaEglColorFormatYVU422SemiPlanar_ER = 43 {"CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER", {"hipEglColorFormatYVU422SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2B // cudaEglColorFormatYVU420SemiPlanar_ER = 44 {"CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER", {"hipEglColorFormatYVU420SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2C // cudaEglColorFormatBayerRGGB = 45 {"CU_EGL_COLOR_FORMAT_BAYER_RGGB", {"hipEglColorFormatBayerRGGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2D // cudaEglColorFormatBayerBGGR = 46 {"CU_EGL_COLOR_FORMAT_BAYER_BGGR", {"hipEglColorFormatBayerBGGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2E // cudaEglColorFormatBayerGRBG = 47 {"CU_EGL_COLOR_FORMAT_BAYER_GRBG", {"hipEglColorFormatBayerGRBG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2F // cudaEglColorFormatBayerGBRG = 48 {"CU_EGL_COLOR_FORMAT_BAYER_GBRG", {"hipEglColorFormatBayerGBRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x30 // cudaEglColorFormatBayer10RGGB = 49 {"CU_EGL_COLOR_FORMAT_BAYER10_RGGB", {"hipEglColorFormatBayer10RGGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x31 // cudaEglColorFormatBayer10BGGR = 50 {"CU_EGL_COLOR_FORMAT_BAYER10_BGGR", {"hipEglColorFormatBayer10BGGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x32 // cudaEglColorFormatBayer10GRBG = 51 {"CU_EGL_COLOR_FORMAT_BAYER10_GRBG", {"hipEglColorFormatBayer10GRBG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x33 // cudaEglColorFormatBayer10GBRG = 52 {"CU_EGL_COLOR_FORMAT_BAYER10_GBRG", {"hipEglColorFormatBayer10GBRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x34 // cudaEglColorFormatBayer12RGGB = 53 {"CU_EGL_COLOR_FORMAT_BAYER12_RGGB", {"hipEglColorFormatBayer12RGGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x35 // cudaEglColorFormatBayer12BGGR = 54 {"CU_EGL_COLOR_FORMAT_BAYER12_BGGR", {"hipEglColorFormatBayer12BGGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x36 // cudaEglColorFormatBayer12GRBG = 55 {"CU_EGL_COLOR_FORMAT_BAYER12_GRBG", {"hipEglColorFormatBayer12GRBG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x37 // cudaEglColorFormatBayer12GBRG = 56 {"CU_EGL_COLOR_FORMAT_BAYER12_GBRG", {"hipEglColorFormatBayer12GBRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x38 // cudaEglColorFormatBayer14RGGB = 57 {"CU_EGL_COLOR_FORMAT_BAYER14_RGGB", {"hipEglColorFormatBayer14RGGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x39 // cudaEglColorFormatBayer14BGGR = 58 {"CU_EGL_COLOR_FORMAT_BAYER14_BGGR", {"hipEglColorFormatBayer14BGGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3A // cudaEglColorFormatBayer14GRBG = 59 {"CU_EGL_COLOR_FORMAT_BAYER14_GRBG", {"hipEglColorFormatBayer14GRBG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3B // cudaEglColorFormatBayer14GBRG = 60 {"CU_EGL_COLOR_FORMAT_BAYER14_GBRG", {"hipEglColorFormatBayer14GBRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3C // cudaEglColorFormatBayer20RGGB = 61 {"CU_EGL_COLOR_FORMAT_BAYER20_RGGB", {"hipEglColorFormatBayer20RGGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3D // cudaEglColorFormatBayer20BGGR = 62 {"CU_EGL_COLOR_FORMAT_BAYER20_BGGR", {"hipEglColorFormatBayer20BGGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3E // cudaEglColorFormatBayer20GRBG = 63 {"CU_EGL_COLOR_FORMAT_BAYER20_GRBG", {"hipEglColorFormatBayer20GRBG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3F // cudaEglColorFormatBayer20GBRG = 64 {"CU_EGL_COLOR_FORMAT_BAYER20_GBRG", {"hipEglColorFormatBayer20GBRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x40 // cudaEglColorFormatYVU444Planar = 65 {"CU_EGL_COLOR_FORMAT_YVU444_PLANAR", {"hipEglColorFormatYVU444Planar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x41 // cudaEglColorFormatYVU422Planar = 66 {"CU_EGL_COLOR_FORMAT_YVU422_PLANAR", {"hipEglColorFormatYVU422Planar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x42 // cudaEglColorFormatYVU420Planar = 67 {"CU_EGL_COLOR_FORMAT_YVU420_PLANAR", {"hipEglColorFormatYVU420Planar", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x43 // cudaEglColorFormatBayerIspRGGB = 68 {"CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB", {"hipEglColorFormatBayerIspRGGB", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x44 // cudaEglColorFormatBayerIspBGGR = 69 {"CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR", {"hipEglColorFormatBayerIspBGGR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x45 // cudaEglColorFormatBayerIspGRBG = 70 {"CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG", {"hipEglColorFormatBayerIspGRBG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x46 // cudaEglColorFormatBayerIspGBRG = 71 {"CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG", {"hipEglColorFormatBayerIspGBRG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x47 // no analogue {"CU_EGL_COLOR_FORMAT_MAX", {"hipEglColorFormatMax", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x48 // cudaEglFrameType {"CUeglFrameType", {"hipEglFrameType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUeglFrameType_enum", {"hipEglFrameType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUeglFrameType enum values // cudaEglFrameTypeArray {"CU_EGL_FRAME_TYPE_ARRAY", {"hipEglFrameTypeArray", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 // cudaEglFrameTypePitch {"CU_EGL_FRAME_TYPE_PITCH", {"hipEglFrameTypePitch", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaEglResourceLocationFlags {"CUeglResourceLocationFlags", {"hipEglResourceLocationFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUeglResourceLocationFlags_enum", {"hipEglResourceLocationFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUeglResourceLocationFlags enum values // cudaEglResourceLocationSysmem {"CU_EGL_RESOURCE_LOCATION_SYSMEM", {"hipEglResourceLocationSysmem", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaEglResourceLocationVidmem {"CU_EGL_RESOURCE_LOCATION_VIDMEM", {"hipEglResourceLocationVidmem", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // no analogue {"CUevent_flags", {"hipEventFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUevent_flags_enum", {"hipEventFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUevent_flags enum values // cudaEventDefault {"CU_EVENT_DEFAULT", {"hipEventDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x00 // cudaEventBlockingSync {"CU_EVENT_BLOCKING_SYNC", {"hipEventBlockingSync", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 // cudaEventDisableTiming {"CU_EVENT_DISABLE_TIMING", {"hipEventDisableTiming", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 // cudaEventInterprocess {"CU_EVENT_INTERPROCESS", {"hipEventInterprocess", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x04 // cudaExternalMemoryHandleType {"CUexternalMemoryHandleType", {"hipExternalMemoryHandleType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUexternalMemoryHandleType_enum", {"hipExternalMemoryHandleType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUexternalMemoryHandleType enum values // cudaExternalMemoryHandleTypeOpaqueFd {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD", {"hipExternalMemoryHandleTypeOpaqueFD", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaExternalMemoryHandleTypeOpaqueWin32 {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32", {"hipExternalMemoryHandleTypeOpaqueWin32", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // cudaExternalMemoryHandleTypeOpaqueWin32Kmt {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT", {"hipExternalMemoryHandleTypeOpaqueWin32KMT", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 // cudaExternalMemoryHandleTypeD3D12Heap {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP", {"hipExternalMemoryHandleTypeD3D12Heap", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 // cudaExternalMemoryHandleTypeD3D12Resource {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE", {"hipExternalMemoryHandleTypeD3D12Resource", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 5 // cudaExternalMemoryHandleTypeD3D11Resource {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE", {"hipExternalMemoryHandleTypeD3D11Resource", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 6 // cudaExternalMemoryHandleTypeD3D11ResourceKmt {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT", {"hipExternalMemoryHandleTypeD3D11ResourceKmt", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 7 // cudaExternalMemoryHandleTypeNvSciBuf {"CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF", {"hipExternalMemoryHandleTypeNvSciBuf", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 8 // cudaExternalSemaphoreHandleType {"CUexternalSemaphoreHandleType", {"hipExternalSemaphoreHandleType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUexternalSemaphoreHandleType_enum", {"hipExternalSemaphoreHandleType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUexternalSemaphoreHandleType enum values // cudaExternalSemaphoreHandleTypeOpaqueFd {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD", {"hipExternalSemaphoreHandleTypeOpaqueFD", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaExternalSemaphoreHandleTypeOpaqueWin32 {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32", {"hipExternalSemaphoreHandleTypeOpaqueWin32", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT", {"hipExternalSemaphoreHandleTypeOpaqueWin32KMT", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 // cudaExternalSemaphoreHandleTypeD3D12Fence {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE", {"hipExternalSemaphoreHandleTypeD3D12Fence", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 // cudaExternalSemaphoreHandleTypeD3D11Fence {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE", {"hipExternalSemaphoreHandleTypeD3D11Fence", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 5 // cudaExternalSemaphoreHandleTypeNvSciSync {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC", {"hipExternalSemaphoreHandleTypeNvSciSync", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 6 // cudaExternalSemaphoreHandleTypeKeyedMutex {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX", {"hipExternalSemaphoreHandleTypeKeyedMutex", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 7 // cudaExternalSemaphoreHandleTypeKeyedMutexKmt {"CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT", {"hipExternalSemaphoreHandleTypeKeyedMutexKmt", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 8 // cudaTextureFilterMode {"CUfilter_mode", {"hipTextureFilterMode", "", CONV_TYPE, API_DRIVER}}, {"CUfilter_mode_enum", {"hipTextureFilterMode", "", CONV_TYPE, API_DRIVER}}, // CUfilter_mode enum values // cudaFilterModePoint {"CU_TR_FILTER_MODE_POINT", {"hipFilterModePoint", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0 // cudaFilterModeLinear {"CU_TR_FILTER_MODE_LINEAR", {"hipFilterModeLinear", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 1 // cudaFuncCache {"CUfunc_cache", {"hipFuncCache_t", "", CONV_TYPE, API_DRIVER}}, {"CUfunc_cache_enum", {"hipFuncCache_t", "", CONV_TYPE, API_DRIVER}}, // CUfunc_cache enum values // cudaFilterModePoint = 0 {"CU_FUNC_CACHE_PREFER_NONE", {"hipFuncCachePreferNone", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x00 // cudaFuncCachePreferShared = 1 {"CU_FUNC_CACHE_PREFER_SHARED", {"hipFuncCachePreferShared", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 // cudaFuncCachePreferL1 = 2 {"CU_FUNC_CACHE_PREFER_L1", {"hipFuncCachePreferL1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 // cudaFuncCachePreferEqual = 3 {"CU_FUNC_CACHE_PREFER_EQUAL", {"hipFuncCachePreferEqual", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x03 // cudaFuncAttribute {"CUfunction_attribute", {"hipFunction_attribute", "", CONV_TYPE, API_DRIVER}}, {"CUfunction_attribute_enum", {"hipFunction_attribute", "", CONV_TYPE, API_DRIVER}}, // CUfunction_attribute enum values // no analogue {"CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK", {"HIP_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0 // no analogue {"CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES", {"HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 1 // no analogue {"CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES", {"HIP_FUNC_ATTRIBUTE_CONST_SIZE_BYTES", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 2 // no analogue {"CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES", {"HIP_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 3 // no analogue {"CU_FUNC_ATTRIBUTE_NUM_REGS", {"HIP_FUNC_ATTRIBUTE_NUM_REGS", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 4 // no analogue {"CU_FUNC_ATTRIBUTE_PTX_VERSION", {"HIP_FUNC_ATTRIBUTE_PTX_VERSION", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 5 // no analogue {"CU_FUNC_ATTRIBUTE_BINARY_VERSION", {"HIP_FUNC_ATTRIBUTE_BINARY_VERSION", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 6 // no analogue {"CU_FUNC_ATTRIBUTE_CACHE_MODE_CA", {"HIP_FUNC_ATTRIBUTE_CACHE_MODE_CA", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 7 // cudaFuncAttributeMaxDynamicSharedMemorySize {"CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES", {"HIP_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 8 // cudaFuncAttributePreferredSharedMemoryCarveout {"CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT", {"HIP_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 9 // cudaFuncAttributeMax {"CU_FUNC_ATTRIBUTE_MAX", {"HIP_FUNC_ATTRIBUTE_MAX", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 10 //cudaGraphicsMapFlags {"CUgraphicsMapResourceFlags", {"hipGraphicsMapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUgraphicsMapResourceFlags_enum", {"hipGraphicsMapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUgraphicsMapResourceFlags enum values // cudaGraphicsMapFlagsNone = 0 {"CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE", {"hipGraphicsMapFlagsNone", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaGraphicsMapFlagsReadOnly = 1 {"CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY", {"hipGraphicsMapFlagsReadOnly", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaGraphicsMapFlagsWriteDiscard = 2 {"CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD", {"hipGraphicsMapFlagsWriteDiscard", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaGraphicsRegisterFlags {"CUgraphicsRegisterFlags", {"hipGraphicsRegisterFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUgraphicsRegisterFlags_enum", {"hipGraphicsRegisterFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraphicsRegisterFlags enum values //cudaGraphicsRegisterFlagsNone = 0 {"CU_GRAPHICS_REGISTER_FLAGS_NONE", {"hipGraphicsRegisterFlagsNone", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaGraphicsRegisterFlagsReadOnly = 1 {"CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY", {"hipGraphicsRegisterFlagsReadOnly", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 //cudaGraphicsRegisterFlagsWriteDiscard = 2 {"CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD", {"hipGraphicsRegisterFlagsWriteDiscard", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaGraphicsRegisterFlagsSurfaceLoadStore = 4 {"CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST", {"hipGraphicsRegisterFlagsSurfaceLoadStore", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // cudaGraphicsRegisterFlagsTextureGather = 8 {"CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER", {"hipGraphicsRegisterFlagsTextureGather", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x08 // cudaGraphNodeType {"CUgraphNodeType", {"hipGraphNodeType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUgraphNodeType_enum", {"hipGraphNodeType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaGraphNodeType enum values // cudaGraphNodeTypeKernel = 0x00 {"CU_GRAPH_NODE_TYPE_KERNEL", {"hipGraphNodeTypeKernel", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 // cudaGraphNodeTypeMemcpy = 0x01 {"CU_GRAPH_NODE_TYPE_MEMCPY", {"hipGraphNodeTypeMemcpy", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaGraphNodeTypeMemset = 0x02 {"CU_GRAPH_NODE_TYPE_MEMSET", {"hipGraphNodeTypeMemset", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // cudaGraphNodeTypeHost = 0x03 {"CU_GRAPH_NODE_TYPE_HOST", {"hipGraphNodeTypeHost", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 // cudaGraphNodeTypeGraph = 0x04 {"CU_GRAPH_NODE_TYPE_GRAPH", {"hipGraphNodeTypeGraph", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 // cudaGraphNodeTypeEmpty = 0x05 {"CU_GRAPH_NODE_TYPE_EMPTY", {"hipGraphNodeTypeEmpty", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 5 // cudaGraphNodeTypeCount {"CU_GRAPH_NODE_TYPE_COUNT", {"hipGraphNodeTypeCount", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 6 // cudaGraphExecUpdateResult {"CUgraphExecUpdateResult", {"hipGraphExecUpdateResult", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUgraphExecUpdateResult_enum", {"hipGraphExecUpdateResult", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUgraphExecUpdateResult enum values // cudaGraphExecUpdateSuccess {"CU_GRAPH_EXEC_UPDATE_SUCCESS", {"hipGraphExecUpdateSuccess", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0 // cudaGraphExecUpdateError {"CU_GRAPH_EXEC_UPDATE_ERROR", {"hipGraphExecUpdateError", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 // cudaGraphExecUpdateErrorTopologyChanged {"CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED", {"hipGraphExecUpdateErrorTopologyChanged", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2 // cudaGraphExecUpdateErrorNodeTypeChanged {"CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED", {"hipGraphExecUpdateErrorNodeTypeChanged", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3 // cudaGraphExecUpdateErrorFunctionChanged {"CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED", {"hipGraphExecUpdateErrorFunctionChanged", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x4 // cudaGraphExecUpdateErrorParametersChanged {"CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED", {"hipGraphExecUpdateErrorParametersChanged", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x5 // cudaGraphExecUpdateErrorNotSupported {"CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED", {"hipGraphExecUpdateErrorNotSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x6 // no analogue {"CUipcMem_flags", {"hipIpcMemFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUipcMem_flags_enum", {"hipIpcMemFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUipcMem_flags enum values // cudaIpcMemLazyEnablePeerAccess {"CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS", {"hipIpcMemLazyEnablePeerAccess", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1 // no analogue {"CUjit_cacheMode", {"hipJitCacheMode", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUjit_cacheMode_enum", {"hipJitCacheMode", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUjit_cacheMode enum values // no analogue {"CU_JIT_CACHE_OPTION_NONE", {"hipJitCacheModeOptionNone", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 // no analogue {"CU_JIT_CACHE_OPTION_CG", {"hipJitCacheModeOptionCG", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CU_JIT_CACHE_OPTION_CA", {"hipJitCacheModeOptionCA", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CUjit_fallback", {"hipJitFallback", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUjit_fallback_enum", {"hipJitFallback", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUjit_fallback enum values {"CU_PREFER_PTX", {"hipJitFallbackPreferPtx", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 {"CU_PREFER_BINARY", {"hipJitFallbackPreferBinary", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue // NOTE: is not used by HIP, as it has no JIT, thus just a dummy enum {"CUjit_option", {"hipJitOption", "", CONV_TYPE, API_DRIVER}}, {"CUjit_option_enum", {"hipJitOption", "", CONV_TYPE, API_DRIVER}}, // CUjit_option enum values {"CU_JIT_MAX_REGISTERS", {"hipJitOptionMaxRegisters", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0 {"CU_JIT_THREADS_PER_BLOCK", {"hipJitOptionThreadsPerBlock", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_WALL_TIME", {"hipJitOptionWallTime", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_INFO_LOG_BUFFER", {"hipJitOptionInfoLogBuffer", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES", {"hipJitOptionInfoLogBufferSizeBytes", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_ERROR_LOG_BUFFER", {"hipJitOptionErrorLogBuffer", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES", {"hipJitOptionErrorLogBufferSizeBytes", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_OPTIMIZATION_LEVEL", {"hipJitOptionOptimizationLevel", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_TARGET_FROM_CUCONTEXT", {"hipJitOptionTargetFromContext", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_TARGET", {"hipJitOptionTarget", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_FALLBACK_STRATEGY", {"hipJitOptionFallbackStrategy", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_GENERATE_DEBUG_INFO", {"hipJitOptionGenerateDebugInfo", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_LOG_VERBOSE", {"hipJitOptionLogVerbose", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_GENERATE_LINE_INFO", {"hipJitOptionGenerateLineInfo", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_CACHE_MODE", {"hipJitOptionCacheMode", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_NEW_SM3X_OPT", {"hipJitOptionSm3xOpt", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_FAST_COMPILE", {"hipJitOptionFastCompile", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_GLOBAL_SYMBOL_NAMES", {"hipJitGlobalSymbolNames", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_GLOBAL_SYMBOL_ADDRESSES", {"hipJitGlobalSymbolAddresses", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_GLOBAL_SYMBOL_COUNT", {"hipJitGlobalSymbolCount", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, {"CU_JIT_NUM_OPTIONS", {"hipJitOptionNumOptions", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // no analogue {"CUjit_target", {"hipJitTarget", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUjit_target_enum", {"hipJitTarget", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUjit_target enum values // NOTE: Deprecated {"CU_TARGET_COMPUTE_10", {"hipJitTargetCompute10", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 10 // NOTE: Deprecated {"CU_TARGET_COMPUTE_11", {"hipJitTargetCompute11", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 11 // NOTE: Deprecated {"CU_TARGET_COMPUTE_12", {"hipJitTargetCompute12", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 12 // NOTE: Deprecated {"CU_TARGET_COMPUTE_13", {"hipJitTargetCompute13", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 13 {"CU_TARGET_COMPUTE_20", {"hipJitTargetCompute20", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 20 {"CU_TARGET_COMPUTE_21", {"hipJitTargetCompute21", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 21 {"CU_TARGET_COMPUTE_30", {"hipJitTargetCompute30", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 20 {"CU_TARGET_COMPUTE_32", {"hipJitTargetCompute32", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 32 {"CU_TARGET_COMPUTE_35", {"hipJitTargetCompute35", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 35 {"CU_TARGET_COMPUTE_37", {"hipJitTargetCompute37", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 37 {"CU_TARGET_COMPUTE_50", {"hipJitTargetCompute50", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 50 {"CU_TARGET_COMPUTE_52", {"hipJitTargetCompute52", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 52 {"CU_TARGET_COMPUTE_53", {"hipJitTargetCompute53", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 53 {"CU_TARGET_COMPUTE_60", {"hipJitTargetCompute60", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 60 {"CU_TARGET_COMPUTE_61", {"hipJitTargetCompute61", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 61 {"CU_TARGET_COMPUTE_62", {"hipJitTargetCompute62", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 62 {"CU_TARGET_COMPUTE_70", {"hipJitTargetCompute70", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 70 {"CU_TARGET_COMPUTE_72", {"hipJitTargetCompute72", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 70 // NOTE: Deprecated {"CU_TARGET_COMPUTE_73", {"hipJitTargetCompute73", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 73 {"CU_TARGET_COMPUTE_75", {"hipJitTargetCompute75", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 75 // no analogue {"CUjitInputType", {"hipJitInputType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUjitInputType_enum", {"hipJitInputType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUjitInputType enum values {"CU_JIT_INPUT_CUBIN", {"hipJitInputTypeBin", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 {"CU_JIT_INPUT_PTX", {"hipJitInputTypePtx", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, {"CU_JIT_INPUT_FATBINARY", {"hipJitInputTypeFatBinary", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, {"CU_JIT_INPUT_OBJECT", {"hipJitInputTypeObject", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, {"CU_JIT_INPUT_LIBRARY", {"hipJitInputTypeLibrary", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, {"CU_JIT_NUM_INPUT_TYPES", {"hipJitInputTypeNumInputTypes", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // cudaLimit {"CUlimit", {"hipLimit_t", "", CONV_TYPE, API_DRIVER}}, {"CUlimit_enum", {"hipLimit_t", "", CONV_TYPE, API_DRIVER}}, // CUlimit enum values // cudaLimitStackSize {"CU_LIMIT_STACK_SIZE", {"hipLimitStackSize", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 //cudaLimitPrintfFifoSize {"CU_LIMIT_PRINTF_FIFO_SIZE", {"hipLimitPrintfFifoSize", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 //cudaLimitMallocHeapSize {"CU_LIMIT_MALLOC_HEAP_SIZE", {"hipLimitMallocHeapSize", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 // cudaLimitDevRuntimeSyncDepth {"CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH", {"hipLimitDevRuntimeSyncDepth", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaLimitDevRuntimePendingLaunchCount {"CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT", {"hipLimitDevRuntimePendingLaunchCount", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x04 // cudaLimitMaxL2FetchGranularity {"CU_LIMIT_MAX_L2_FETCH_GRANULARITY", {"hipLimitMaxL2FetchGranularity", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x05 // no analogue {"CU_LIMIT_MAX", {"hipLimitMax", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // cudaMemoryAdvise {"CUmem_advise", {"hipMemAdvise", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmem_advise_enum", {"hipMemAdvise", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmem_advise enum values // cudaMemAdviseSetReadMostly {"CU_MEM_ADVISE_SET_READ_MOSTLY", {"hipMemAdviseSetReadMostly", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaMemAdviseUnsetReadMostly {"CU_MEM_ADVISE_UNSET_READ_MOSTLY", {"hipMemAdviseUnsetReadMostly", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // cudaMemAdviseSetPreferredLocation {"CU_MEM_ADVISE_SET_PREFERRED_LOCATION", {"hipMemAdviseSetPreferredLocation", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 // cudaMemAdviseUnsetPreferredLocation {"CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION", {"hipMemAdviseUnsetPreferredLocation", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 // cudaMemAdviseSetAccessedBy {"CU_MEM_ADVISE_SET_ACCESSED_BY", {"hipMemAdviseSetAccessedBy", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 5 // cudaMemAdviseUnsetAccessedBy {"CU_MEM_ADVISE_UNSET_ACCESSED_BY", {"hipMemAdviseUnsetAccessedBy", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 6 // no analogue {"CUmemAttach_flags", {"hipMemAttachFlags_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemAttach_flags_enum", {"hipMemAttachFlags_t", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmemAttach_flags enum values // cudaMemAttachGlobal {"CU_MEM_ATTACH_GLOBAL", {"hipMemAttachGlobal", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1 // cudaMemAttachHost {"CU_MEM_ATTACH_HOST", {"hipMemAttachHost", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x2 // cudaMemAttachSingle {"CU_MEM_ATTACH_SINGLE", {"hipMemAttachSingle", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x4 // no analogue // NOTE: cudaMemoryType is partial analogue {"CUmemorytype", {"hipMemoryType", "", CONV_TYPE, API_DRIVER}}, {"CUmemorytype_enum", {"hipMemoryType", "", CONV_TYPE, API_DRIVER}}, // CUmemorytype enum values {"CU_MEMORYTYPE_HOST", {"hipMemoryTypeHost", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 {"CU_MEMORYTYPE_DEVICE", {"hipMemoryTypeDevice", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 {"CU_MEMORYTYPE_ARRAY", {"hipMemoryTypeArray", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x03 {"CU_MEMORYTYPE_UNIFIED", {"hipMemoryTypeUnified", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x04 // cudaMemRangeAttribute {"CUmem_range_attribute", {"hipMemRangeAttribute", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmem_range_attribute_enum", {"hipMemRangeAttribute", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmem_range_attribute enum values // cudaMemRangeAttributeReadMostly {"CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY", {"hipMemRangeAttributeReadMostly", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaMemRangeAttributePreferredLocation {"CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION", {"hipMemRangeAttributePreferredLocation", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // cudaMemRangeAttributeAccessedBy {"CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY", {"hipMemRangeAttributeAccessedBy", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 // cudaMemRangeAttributeLastPrefetchLocation {"CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION", {"hipMemRangeAttributeLastPrefetchLocation", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 //no analogue {"CUoccupancy_flags", {"hipOccupancyFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUoccupancy_flags_enum", {"hipOccupancyFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUoccupancy_flags enum values // cudaOccupancyDefault {"CU_OCCUPANCY_DEFAULT", {"hipOccupancyDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaOccupancyDisableCachingOverride {"CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE", {"hipOccupancyDisableCachingOverride", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 //no analogue // TODO: Analogous enum is needed in HIP. Couldn't map enum to struct hipPointerAttribute_t. // TODO: Do the same for Pointer Attributes as for Device Attributes. {"CUpointer_attribute", {"hipPointerAttribute", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUpointer_attribute_enum", {"hipPointerAttribute", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUpointer_attribute enum values {"CU_POINTER_ATTRIBUTE_CONTEXT", {"hipPointerAttributeContext", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 {"CU_POINTER_ATTRIBUTE_MEMORY_TYPE", {"hipPointerAttributeMemoryType", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 {"CU_POINTER_ATTRIBUTE_DEVICE_POINTER", {"hipPointerAttributeDevicePointer", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 {"CU_POINTER_ATTRIBUTE_HOST_POINTER", {"hipPointerAttributeHostPointer", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 {"CU_POINTER_ATTRIBUTE_P2P_TOKENS", {"hipPointerAttributeP2pTokens", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 5 {"CU_POINTER_ATTRIBUTE_SYNC_MEMOPS", {"hipPointerAttributeSyncMemops", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 6 {"CU_POINTER_ATTRIBUTE_BUFFER_ID", {"hipPointerAttributeBufferId", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 7 {"CU_POINTER_ATTRIBUTE_IS_MANAGED", {"hipPointerAttributeIsManaged", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 8 {"CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL", {"hipPointerAttributeDeviceOrdinal", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 9 {"CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE", {"hipPointerAttributeIsLegacyCudaIpcCapable", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 10 {"CU_POINTER_ATTRIBUTE_RANGE_START_ADDR", {"hipPointerAttributeRangeStartAddress", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 11 {"CU_POINTER_ATTRIBUTE_RANGE_SIZE", {"hipPointerAttributeRangeSize", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 12 {"CU_POINTER_ATTRIBUTE_MAPPED", {"hipPointerAttributeMapped", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 13 {"CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES", {"hipPointerAttributeAllowedHandleTypes", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 14 // cudaResourceType {"CUresourcetype", {"hipResourceType", "", CONV_TYPE, API_DRIVER}}, {"CUresourcetype_enum", {"hipResourceType", "", CONV_TYPE, API_DRIVER}}, // CUresourcetype enum values // cudaResourceTypeArray {"CU_RESOURCE_TYPE_ARRAY", {"hipResourceTypeArray", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x00 //cudaResourceTypeMipmappedArray {"CU_RESOURCE_TYPE_MIPMAPPED_ARRAY", {"hipResourceTypeMipmappedArray", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 //cudaResourceTypeLinear {"CU_RESOURCE_TYPE_LINEAR", {"hipResourceTypeLinear", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 //cudaResourceTypePitch2D {"CU_RESOURCE_TYPE_PITCH2D", {"hipResourceTypePitch2D", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x03 // cudaResourceViewFormat {"CUresourceViewFormat", {"hipResourceViewFormat", "", CONV_TYPE, API_DRIVER}}, {"CUresourceViewFormat_enum", {"hipResourceViewFormat", "", CONV_TYPE, API_DRIVER}}, // CUresourceViewFormat enum values // cudaResViewFormatNone {"CU_RES_VIEW_FORMAT_NONE", {"hipResViewFormatNone", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x00 // cudaResViewFormatUnsignedChar1 {"CU_RES_VIEW_FORMAT_UINT_1X8", {"hipResViewFormatUnsignedChar1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 // cudaResViewFormatUnsignedChar2 {"CU_RES_VIEW_FORMAT_UINT_2X8", {"hipResViewFormatUnsignedChar2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 // cudaResViewFormatUnsignedChar4 {"CU_RES_VIEW_FORMAT_UINT_4X8", {"hipResViewFormatUnsignedChar4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x03 // cudaResViewFormatSignedChar1 {"CU_RES_VIEW_FORMAT_SINT_1X8", {"hipResViewFormatSignedChar1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x04 // cudaResViewFormatSignedChar2 {"CU_RES_VIEW_FORMAT_SINT_2X8", {"hipResViewFormatSignedChar2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x05 // cudaResViewFormatSignedChar4 {"CU_RES_VIEW_FORMAT_SINT_4X8", {"hipResViewFormatSignedChar4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x06 // cudaResViewFormatUnsignedShort1 {"CU_RES_VIEW_FORMAT_UINT_1X16", {"hipResViewFormatUnsignedShort1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x07 // cudaResViewFormatUnsignedShort2 {"CU_RES_VIEW_FORMAT_UINT_2X16", {"hipResViewFormatUnsignedShort2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x08 // cudaResViewFormatUnsignedShort4 {"CU_RES_VIEW_FORMAT_UINT_4X16", {"hipResViewFormatUnsignedShort4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x09 // cudaResViewFormatSignedShort1 {"CU_RES_VIEW_FORMAT_SINT_1X16", {"hipResViewFormatSignedShort1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0a // cudaResViewFormatSignedShort2 {"CU_RES_VIEW_FORMAT_SINT_2X16", {"hipResViewFormatSignedShort2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0b // cudaResViewFormatSignedShort4 {"CU_RES_VIEW_FORMAT_SINT_4X16", {"hipResViewFormatSignedShort4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0c // cudaResViewFormatUnsignedInt1 {"CU_RES_VIEW_FORMAT_UINT_1X32", {"hipResViewFormatUnsignedInt1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0d // cudaResViewFormatUnsignedInt2 {"CU_RES_VIEW_FORMAT_UINT_2X32", {"hipResViewFormatUnsignedInt2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0e // cudaResViewFormatUnsignedInt4 {"CU_RES_VIEW_FORMAT_UINT_4X32", {"hipResViewFormatUnsignedInt4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0f // cudaResViewFormatSignedInt1 {"CU_RES_VIEW_FORMAT_SINT_1X32", {"hipResViewFormatSignedInt1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x10 // cudaResViewFormatSignedInt2 {"CU_RES_VIEW_FORMAT_SINT_2X32", {"hipResViewFormatSignedInt2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x11 // cudaResViewFormatSignedInt4 {"CU_RES_VIEW_FORMAT_SINT_4X32", {"hipResViewFormatSignedInt4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x12 // cudaResViewFormatHalf1 {"CU_RES_VIEW_FORMAT_FLOAT_1X16", {"hipResViewFormatHalf1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x13 // cudaResViewFormatHalf2 {"CU_RES_VIEW_FORMAT_FLOAT_2X16", {"hipResViewFormatHalf2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x14 // cudaResViewFormatHalf4 {"CU_RES_VIEW_FORMAT_FLOAT_4X16", {"hipResViewFormatHalf4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x15 // cudaResViewFormatFloat1 {"CU_RES_VIEW_FORMAT_FLOAT_1X32", {"hipResViewFormatFloat1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x16 // cudaResViewFormatFloat2 {"CU_RES_VIEW_FORMAT_FLOAT_2X32", {"hipResViewFormatFloat2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x17 // cudaResViewFormatFloat4 {"CU_RES_VIEW_FORMAT_FLOAT_4X32", {"hipResViewFormatFloat4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x18 // cudaResViewFormatUnsignedBlockCompressed1 {"CU_RES_VIEW_FORMAT_UNSIGNED_BC1", {"hipResViewFormatUnsignedBlockCompressed1", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x19 // cudaResViewFormatUnsignedBlockCompressed2 {"CU_RES_VIEW_FORMAT_UNSIGNED_BC2", {"hipResViewFormatUnsignedBlockCompressed2", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1a // cudaResViewFormatUnsignedBlockCompressed3 {"CU_RES_VIEW_FORMAT_UNSIGNED_BC3", {"hipResViewFormatUnsignedBlockCompressed3", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1b // cudaResViewFormatUnsignedBlockCompressed4 {"CU_RES_VIEW_FORMAT_UNSIGNED_BC4", {"hipResViewFormatUnsignedBlockCompressed4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1c // cudaResViewFormatSignedBlockCompressed4 {"CU_RES_VIEW_FORMAT_SIGNED_BC4", {"hipResViewFormatSignedBlockCompressed4", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1d // cudaResViewFormatUnsignedBlockCompressed5 {"CU_RES_VIEW_FORMAT_UNSIGNED_BC5", {"hipResViewFormatUnsignedBlockCompressed5", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1e // cudaResViewFormatSignedBlockCompressed5 {"CU_RES_VIEW_FORMAT_SIGNED_BC5", {"hipResViewFormatSignedBlockCompressed5", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1f // cudaResViewFormatUnsignedBlockCompressed6H {"CU_RES_VIEW_FORMAT_UNSIGNED_BC6H", {"hipResViewFormatUnsignedBlockCompressed6H", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x20 // cudaResViewFormatSignedBlockCompressed6H {"CU_RES_VIEW_FORMAT_SIGNED_BC6H", {"hipResViewFormatSignedBlockCompressed6H", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x21 // cudaResViewFormatUnsignedBlockCompressed7 {"CU_RES_VIEW_FORMAT_UNSIGNED_BC7", {"hipResViewFormatUnsignedBlockCompressed7", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x22 // cudaError {"CUresult", {"hipError_t", "", CONV_TYPE, API_DRIVER}}, {"cudaError_enum", {"hipError_t", "", CONV_TYPE, API_DRIVER}}, // CUresult enum values // cudaSuccess {"CUDA_SUCCESS", {"hipSuccess", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0 // cudaErrorInvalidValue {"CUDA_ERROR_INVALID_VALUE", {"hipErrorInvalidValue", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 1 // cudaErrorMemoryAllocation {"CUDA_ERROR_OUT_OF_MEMORY", {"hipErrorOutOfMemory", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 2 // cudaErrorInitializationError {"CUDA_ERROR_NOT_INITIALIZED", {"hipErrorNotInitialized", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 3 // cudaErrorCudartUnloading {"CUDA_ERROR_DEINITIALIZED", {"hipErrorDeinitialized", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 4 // cudaErrorProfilerDisabled {"CUDA_ERROR_PROFILER_DISABLED", {"hipErrorProfilerDisabled", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 5 // cudaErrorProfilerNotInitialized // NOTE: Deprecated since CUDA 5.0 {"CUDA_ERROR_PROFILER_NOT_INITIALIZED", {"hipErrorProfilerNotInitialized", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 6 // NOTE: Deprecated since CUDA 5.0 // cudaErrorProfilerAlreadyStarted {"CUDA_ERROR_PROFILER_ALREADY_STARTED", {"hipErrorProfilerAlreadyStarted", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 7 // cudaErrorProfilerAlreadyStopped // NOTE: Deprecated since CUDA 5.0 {"CUDA_ERROR_PROFILER_ALREADY_STOPPED", {"hipErrorProfilerAlreadyStopped", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 8 // cudaErrorNoDevice {"CUDA_ERROR_NO_DEVICE", {"hipErrorNoDevice", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 100 // cudaErrorInvalidDevice {"CUDA_ERROR_INVALID_DEVICE", {"hipErrorInvalidDevice", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 101 // cudaErrorInvalidKernelImage {"CUDA_ERROR_INVALID_IMAGE", {"hipErrorInvalidImage", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 200 // cudaErrorDeviceUninitilialized {"CUDA_ERROR_INVALID_CONTEXT", {"hipErrorInvalidContext", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 201 // no analogue // NOTE: Deprecated since CUDA 3.2 {"CUDA_ERROR_CONTEXT_ALREADY_CURRENT", {"hipErrorContextAlreadyCurrent", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 202 // cudaErrorMapBufferObjectFailed {"CUDA_ERROR_MAP_FAILED", {"hipErrorMapFailed", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 205 // cudaErrorUnmapBufferObjectFailed {"CUDA_ERROR_UNMAP_FAILED", {"hipErrorUnmapFailed", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 206 // cudaErrorArrayIsMapped {"CUDA_ERROR_ARRAY_IS_MAPPED", {"hipErrorArrayIsMapped", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 207 // cudaErrorAlreadyMapped {"CUDA_ERROR_ALREADY_MAPPED", {"hipErrorAlreadyMapped", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 208 // cudaErrorNoKernelImageForDevice {"CUDA_ERROR_NO_BINARY_FOR_GPU", {"hipErrorNoBinaryForGpu", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 209 // cudaErrorAlreadyAcquired {"CUDA_ERROR_ALREADY_ACQUIRED", {"hipErrorAlreadyAcquired", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 210 // cudaErrorNotMapped {"CUDA_ERROR_NOT_MAPPED", {"hipErrorNotMapped", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 211 // cudaErrorNotMappedAsArray {"CUDA_ERROR_NOT_MAPPED_AS_ARRAY", {"hipErrorNotMappedAsArray", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 212 // cudaErrorNotMappedAsPointer {"CUDA_ERROR_NOT_MAPPED_AS_POINTER", {"hipErrorNotMappedAsPointer", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 213 // cudaErrorECCUncorrectable {"CUDA_ERROR_ECC_UNCORRECTABLE", {"hipErrorECCNotCorrectable", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 214 // cudaErrorUnsupportedLimit {"CUDA_ERROR_UNSUPPORTED_LIMIT", {"hipErrorUnsupportedLimit", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 215 // cudaErrorDeviceAlreadyInUse {"CUDA_ERROR_CONTEXT_ALREADY_IN_USE", {"hipErrorContextAlreadyInUse", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 216 // cudaErrorPeerAccessUnsupported {"CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", {"hipErrorPeerAccessUnsupported", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 217 // cudaErrorInvalidPtx {"CUDA_ERROR_INVALID_PTX", {"hipErrorInvalidKernelFile", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 218 // cudaErrorInvalidGraphicsContext {"CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", {"hipErrorInvalidGraphicsContext", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 219 // cudaErrorNvlinkUncorrectable {"CUDA_ERROR_NVLINK_UNCORRECTABLE", {"hipErrorNvlinkUncorrectable", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 220 // cudaErrorJitCompilerNotFound {"CUDA_ERROR_JIT_COMPILER_NOT_FOUND", {"hipErrorJitCompilerNotFound", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 221 // cudaErrorInvalidSource {"CUDA_ERROR_INVALID_SOURCE", {"hipErrorInvalidSource", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 300 // cudaErrorFileNotFound {"CUDA_ERROR_FILE_NOT_FOUND", {"hipErrorFileNotFound", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 301 // cudaErrorSharedObjectSymbolNotFound {"CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", {"hipErrorSharedObjectSymbolNotFound", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 302 // cudaErrorSharedObjectInitFailed {"CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", {"hipErrorSharedObjectInitFailed", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 303 // cudaErrorOperatingSystem {"CUDA_ERROR_OPERATING_SYSTEM", {"hipErrorOperatingSystem", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 304 // cudaErrorInvalidResourceHandle {"CUDA_ERROR_INVALID_HANDLE", {"hipErrorInvalidHandle", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 400 // cudaErrorIllegalState {"CUDA_ERROR_ILLEGAL_STATE", {"hipErrorIllegalState", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 401 // cudaErrorSymbolNotFound {"CUDA_ERROR_NOT_FOUND", {"hipErrorNotFound", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 500 // cudaErrorNotReady {"CUDA_ERROR_NOT_READY", {"hipErrorNotReady", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 600 // cudaErrorIllegalAddress {"CUDA_ERROR_ILLEGAL_ADDRESS", {"hipErrorIllegalAddress", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 700 // cudaErrorLaunchOutOfResources {"CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", {"hipErrorLaunchOutOfResources", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 701 // cudaErrorLaunchTimeout {"CUDA_ERROR_LAUNCH_TIMEOUT", {"hipErrorLaunchTimeOut", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 702 // cudaErrorLaunchIncompatibleTexturing {"CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", {"hipErrorLaunchIncompatibleTexturing", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 703 // cudaErrorPeerAccessAlreadyEnabled {"CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", {"hipErrorPeerAccessAlreadyEnabled", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 704 // cudaErrorPeerAccessNotEnabled {"CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", {"hipErrorPeerAccessNotEnabled", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 705 // cudaErrorSetOnActiveProcess {"CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", {"hipErrorSetOnActiveProcess", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 708 // cudaErrorContextIsDestroyed {"CUDA_ERROR_CONTEXT_IS_DESTROYED", {"hipErrorContextIsDestroyed", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 709 // cudaErrorAssert {"CUDA_ERROR_ASSERT", {"hipErrorAssert", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 710 // cudaErrorTooManyPeers {"CUDA_ERROR_TOO_MANY_PEERS", {"hipErrorTooManyPeers", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 711 // cudaErrorHostMemoryAlreadyRegistered {"CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", {"hipErrorHostMemoryAlreadyRegistered", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 712 // cudaErrorHostMemoryNotRegistered {"CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", {"hipErrorHostMemoryNotRegistered", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 713 // cudaErrorHardwareStackError {"CUDA_ERROR_HARDWARE_STACK_ERROR", {"hipErrorHardwareStackError", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 714 // cudaErrorIllegalInstruction {"CUDA_ERROR_ILLEGAL_INSTRUCTION", {"hipErrorIllegalInstruction", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 715 // cudaErrorMisalignedAddress {"CUDA_ERROR_MISALIGNED_ADDRESS", {"hipErrorMisalignedAddress", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 716 // cudaErrorInvalidAddressSpace {"CUDA_ERROR_INVALID_ADDRESS_SPACE", {"hipErrorInvalidAddressSpace", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 717 // cudaErrorInvalidPc {"CUDA_ERROR_INVALID_PC", {"hipErrorInvalidPc", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 718 // cudaErrorLaunchFailure {"CUDA_ERROR_LAUNCH_FAILED", {"hipErrorLaunchFailure", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 719 // cudaErrorNotPermitted {"CUDA_ERROR_NOT_PERMITTED", {"hipErrorNotPermitted", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 800 // cudaErrorNotSupported {"CUDA_ERROR_NOT_SUPPORTED", {"hipErrorNotSupported", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 801 // cudaErrorSystemNotReady {"CUDA_ERROR_SYSTEM_NOT_READY", {"hipErrorSystemNotReady", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 802 // cudaErrorSystemDriverMismatch {"CUDA_ERROR_SYSTEM_DRIVER_MISMATCH", {"hipErrorSystemDriverMismatch", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 803 // cudaErrorCompatNotSupportedOnDevice {"CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE", {"hipErrorCompatNotSupportedOnDevice", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 804 // cudaErrorStreamCaptureUnsupported {"CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED", {"hipErrorStreamCaptureUnsupported", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 900 // cudaErrorStreamCaptureInvalidated {"CUDA_ERROR_STREAM_CAPTURE_INVALIDATED", {"hipErrorStreamCaptureInvalidated", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 901 // cudaErrorStreamCaptureMerge {"CUDA_ERROR_STREAM_CAPTURE_MERGE", {"hipErrorStreamCaptureMerge", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 902 // cudaErrorStreamCaptureUnmatched {"CUDA_ERROR_STREAM_CAPTURE_UNMATCHED", {"hipErrorStreamCaptureUnmatched", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 903 // cudaErrorStreamCaptureUnjoined {"CUDA_ERROR_STREAM_CAPTURE_UNJOINED", {"hipErrorStreamCaptureUnjoined", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 904 // cudaErrorStreamCaptureIsolation {"CUDA_ERROR_STREAM_CAPTURE_ISOLATION", {"hipErrorStreamCaptureIsolation", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 905 // cudaErrorStreamCaptureImplicit {"CUDA_ERROR_STREAM_CAPTURE_IMPLICIT", {"hipErrorStreamCaptureImplicit", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 906 // cudaErrorCapturedEvent {"CUDA_ERROR_CAPTURED_EVENT", {"hipErrorCapturedEvent", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 907 // cudaErrorStreamCaptureWrongThread {"CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD", {"hipErrorStreamCaptureWrongThread", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 908 // cudaErrorTimeout {"CUDA_ERROR_TIMEOUT", {"hipErrorTimeout", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 909 // cudaErrorGraphExecUpdateFailure {"CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE", {"hipErrorGraphExecUpdateFailure", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 910 // cudaErrorUnknown {"CUDA_ERROR_UNKNOWN", {"hipErrorUnknown", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 999 // cudaSharedMemConfig {"CUsharedconfig", {"hipSharedMemConfig", "", CONV_TYPE, API_DRIVER}}, {"CUsharedconfig_enum", {"hipSharedMemConfig", "", CONV_TYPE, API_DRIVER}}, // CUsharedconfig enum values // cudaSharedMemBankSizeDefault = 0 {"CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE", {"hipSharedMemBankSizeDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x00 // cudaSharedMemBankSizeFourByte = 1 {"CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE", {"hipSharedMemBankSizeFourByte", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x01 // cudaSharedMemBankSizeEightByte = 2 {"CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE", {"hipSharedMemBankSizeEightByte", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x02 // cudaSharedCarveout {"CUshared_carveout", {"hipSharedCarveout", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUshared_carveout_enum", {"hipSharedCarveout", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUshared_carveout enum values // cudaSharedmemCarveoutDefault {"CU_SHAREDMEM_CARVEOUT_DEFAULT", {"hipSharedmemCarveoutDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // -1 // cudaSharedmemCarveoutMaxShared {"CU_SHAREDMEM_CARVEOUT_MAX_SHARED", {"hipSharedmemCarveoutMaxShared", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 100 // cudaSharedmemCarveoutMaxShared {"CU_SHAREDMEM_CARVEOUT_MAX_L1", {"hipSharedmemCarveoutMaxL1", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 // no analogue {"CUstream_flags", {"hipStreamFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstream_flags_enum", {"hipStreamFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUstream_flags enum values // cudaStreamDefault = 0x00 {"CU_STREAM_DEFAULT", {"hipStreamDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x0 // cudaStreamNonBlocking = 0x01 {"CU_STREAM_NON_BLOCKING", {"hipStreamNonBlocking", "", CONV_NUMERIC_LITERAL, API_DRIVER}}, // 0x1 // no analogue {"CUstreamBatchMemOpType", {"hipStreamBatchMemOpType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstreamBatchMemOpType_enum", {"hipStreamBatchMemOpType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUstreamBatchMemOpType enum values {"CU_STREAM_MEM_OP_WAIT_VALUE_32", {"hipStreamBatchMemOpWaitValue32", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 {"CU_STREAM_MEM_OP_WRITE_VALUE_32", {"hipStreamBatchMemOpWriteValue32", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 {"CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES", {"hipStreamBatchMemOpFlushRemoteWrites", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 3 {"CU_STREAM_MEM_OP_WAIT_VALUE_64", {"hipStreamBatchMemOpWaitValue64", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 4 {"CU_STREAM_MEM_OP_WRITE_VALUE_64", {"hipStreamBatchMemOpWriteValue64", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 5 // cudaStreamCaptureStatus {"CUstreamCaptureStatus", {"hipStreamCaptureStatus", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstreamCaptureStatus_enum", {"hipStreamCaptureStatus", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUstreamCaptureStatus enum values // cudaStreamCaptureStatusNone {"CU_STREAM_CAPTURE_STATUS_NONE", {"hipStreamCaptureStatusNone", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 // cudaStreamCaptureStatusActive {"CU_STREAM_CAPTURE_STATUS_ACTIVE", {"hipStreamCaptureStatusActive", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaStreamCaptureStatusInvalidated {"CU_STREAM_CAPTURE_STATUS_INVALIDATED", {"hipStreamCaptureStatusInvalidated", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // cudaStreamCaptureMode {"CUstreamCaptureMode", {"hipStreamCaptureMode", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstreamCaptureMode_enum", {"hipStreamCaptureMode", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUstreamCaptureMode enum values // cudaStreamCaptureModeGlobal {"CU_STREAM_CAPTURE_MODE_GLOBAL", {"hipStreamCaptureModeGlobal", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0 // cudaStreamCaptureModeThreadLocal {"CU_STREAM_CAPTURE_MODE_THREAD_LOCAL", {"hipStreamCaptureModeThreadLocal", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1 // cudaStreamCaptureModeRelaxed {"CU_STREAM_CAPTURE_MODE_RELAXED", {"hipStreamCaptureModeRelaxed", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 2 // no analogue {"CUstreamWaitValue_flags", {"hipStreamWaitValueFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstreamWaitValue_flags_enum", {"hipStreamWaitValueFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUstreamWaitValue_flags enum values {"CU_STREAM_WAIT_VALUE_GEQ", {"hipStreamWaitValueGeq", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0 {"CU_STREAM_WAIT_VALUE_EQ", {"hipStreamWaitValueEq", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 {"CU_STREAM_WAIT_VALUE_AND", {"hipStreamWaitValueAnd", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2 {"CU_STREAM_WAIT_VALUE_FLUSH", {"hipStreamWaitValueFlush", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 1<<30 // no analogue {"CUstreamWriteValue_flags", {"hipStreamWriteValueFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUstreamWriteValue_flags_enum", {"hipStreamWriteValueFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUstreamWriteValue_flags enum values {"CU_STREAM_WRITE_VALUE_DEFAULT", {"hipStreamWriteValueDefault", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0 {"CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER", {"hipStreamWriteValueNoMemoryBarrier", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 // cudaGLDeviceList {"CUGLDeviceList", {"hipGLDeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUGLDeviceList_enum", {"hipGLDeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUGLDeviceList enum values // cudaGLDeviceListAll = 1 {"CU_GL_DEVICE_LIST_ALL", {"hipGLDeviceListAll", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaGLDeviceListCurrentFrame = 2 {"CU_GL_DEVICE_LIST_CURRENT_FRAME", {"hipGLDeviceListCurrentFrame", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaGLDeviceListNextFrame = 3 {"CU_GL_DEVICE_LIST_NEXT_FRAME", {"hipGLDeviceListNextFrame", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaGLMapFlags {"CUGLmap_flags", {"hipGLMapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUGLmap_flags_enum", {"hipGLMapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUGLmap_flags enum values // cudaGLMapFlagsNone = 0 {"CU_GL_MAP_RESOURCE_FLAGS_NONE", {"hipGLMapFlagsNone", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaGLMapFlagsReadOnly = 1 {"CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY", {"hipGLMapFlagsReadOnly", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaGLMapFlagsWriteDiscard = 2 {"CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", {"hipGLMapFlagsWriteDiscard", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaD3D9DeviceList {"CUd3d9DeviceList", {"hipD3D9DeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d9DeviceList_enum", {"hipD3D9DeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d9DeviceList enum values // cudaD3D9DeviceListAll = 1 {"CU_D3D9_DEVICE_LIST_ALL", {"HIP_D3D9_DEVICE_LIST_ALL", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D9DeviceListCurrentFrame = 2 {"CU_D3D9_DEVICE_LIST_CURRENT_FRAME", {"HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaD3D9DeviceListNextFrame = 3 {"CU_D3D9_DEVICE_LIST_NEXT_FRAME", {"HIP_D3D9_DEVICE_LIST_NEXT_FRAME", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaD3D9MapFlags // NOTE: Deprecated {"CUd3d9map_flags", {"hipD3D9MapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d9map_flags_enum", {"hipD3D9MapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d9map_flags enum values // cudaD3D9MapFlagsNone = 0 {"CU_D3D9_MAPRESOURCE_FLAGS_NONE", {"HIP_D3D9_MAPRESOURCE_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaD3D9MapFlagsReadOnly = 1 {"CU_D3D9_MAPRESOURCE_FLAGS_READONLY", {"HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D9MapFlagsWriteDiscard = 2 {"CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", {"HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaD3D9RegisterFlags {"CUd3d9register_flags", {"hipD3D9RegisterFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d9register_flags_enum", {"hipD3D9RegisterFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d9register_flags enum values // cudaD3D9RegisterFlagsNone = 0 {"CU_D3D9_REGISTER_FLAGS_NONE", {"HIP_D3D9_REGISTER_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaD3D9RegisterFlagsArray = 1 {"CU_D3D9_REGISTER_FLAGS_ARRAY", {"HIP_D3D9_REGISTER_FLAGS_ARRAY", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D10DeviceList {"CUd3d10DeviceList", {"hipd3d10DeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d10DeviceList_enum", {"hipD3D10DeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d10DeviceList enum values // cudaD3D10DeviceListAll = 1 {"CU_D3D10_DEVICE_LIST_ALL", {"HIP_D3D10_DEVICE_LIST_ALL", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D10DeviceListCurrentFrame = 2 {"CU_D3D10_DEVICE_LIST_CURRENT_FRAME", {"HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaD3D10DeviceListNextFrame = 3 {"CU_D3D10_DEVICE_LIST_NEXT_FRAME", {"HIP_D3D10_DEVICE_LIST_NEXT_FRAME", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // cudaD3D10MapFlags {"CUd3d10map_flags", {"hipD3D10MapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d10map_flags_enum", {"hipD3D10MapFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d10map_flags enum values // cudaD3D10MapFlagsNone = 0 {"CU_D3D10_MAPRESOURCE_FLAGS_NONE", {"HIP_D3D10_MAPRESOURCE_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaD3D10MapFlagsReadOnly = 1 {"CU_D3D10_MAPRESOURCE_FLAGS_READONLY", {"HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D10MapFlagsWriteDiscard = 2 {"CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", {"HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaD3D10RegisterFlags {"CUd3d10register_flags", {"hipD3D10RegisterFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d10register_flags_enum", {"hipD3D10RegisterFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d10register_flags enum values // cudaD3D10RegisterFlagsNone = 0 {"CU_D3D10_REGISTER_FLAGS_NONE", {"HIP_D3D10_REGISTER_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x00 // cudaD3D10RegisterFlagsArray = 1 {"CU_D3D10_REGISTER_FLAGS_ARRAY", {"HIP_D3D10_REGISTER_FLAGS_ARRAY", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D11DeviceList {"CUd3d11DeviceList", {"hipd3d11DeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUd3d11DeviceList_enum", {"hipD3D11DeviceList", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUd3d11DeviceList enum values // cudaD3D11DeviceListAll = 1 {"CU_D3D11_DEVICE_LIST_ALL", {"HIP_D3D11_DEVICE_LIST_ALL", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaD3D11DeviceListCurrentFrame = 2 {"CU_D3D11_DEVICE_LIST_CURRENT_FRAME", {"HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaD3D11DeviceListNextFrame = 3 {"CU_D3D11_DEVICE_LIST_NEXT_FRAME", {"HIP_D3D11_DEVICE_LIST_NEXT_FRAME", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x03 // no analogue {"CUmemAllocationHandleType", {"hipMemoryAllocationHandleType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemAllocationHandleType_enum", {"hipMemoryAllocationHandleType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmemAllocationHandleType enum values {"CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR", {"HIP_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 {"CU_MEM_HANDLE_TYPE_WIN32", {"HIP_MEM_HANDLE_TYPE_WIN32", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2 {"CU_MEM_HANDLE_TYPE_WIN32_KMT", {"HIP_MEM_HANDLE_TYPE_WIN32_KMT", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x4 {"CU_MEM_HANDLE_TYPE_MAX", {"HIP_MEM_HANDLE_TYPE_MAX", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0xFFFFFFFF // no analogue {"CUmemAccess_flags", {"hipMemoryAccessFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemAccess_flags_enum", {"hipMemoryAccessFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmemAccess_flags enum values {"CU_MEM_ACCESS_FLAGS_PROT_NONE", {"HIP_MEM_ACCESS_FLAGS_PROT_NONE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 {"CU_MEM_ACCESS_FLAGS_PROT_READ", {"HIP_MEM_ACCESS_FLAGS_PROT_READ", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x2 {"CU_MEM_ACCESS_FLAGS_PROT_READWRITE", {"HIP_MEM_ACCESS_FLAGS_PROT_READWRITE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x3 {"CU_MEM_ACCESS_FLAGS_PROT_MAX", {"HIP_MEM_ACCESS_FLAGS_PROT_MAX", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0xFFFFFFFF // no analogue {"CUmemLocationType", {"hipMemoryLocationType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemLocationType_enum", {"hipMemoryLocationType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmemLocationType enum values {"CU_MEM_LOCATION_TYPE_INVALID", {"HIP_MEM_LOCATION_TYPE_INVALID", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0 {"CU_MEM_LOCATION_TYPE_DEVICE", {"HIP_MEM_LOCATION_TYPE_DEVICE", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 {"CU_MEM_LOCATION_TYPE_MAX", {"HIP_MEM_LOCATION_TYPE_MAX", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0xFFFFFFFF // no analogue {"CUmemAllocationGranularity_flags", {"hipMemoryAllocationGranularityFlags", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, {"CUmemAllocationGranularity_flags_enum", {"hipMemoryLocationType", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // CUmemAllocationGranularity_flags enum values {"CU_MEM_ALLOC_GRANULARITY_MINIMUM", {"HIP_MEM_ALLOC_GRANULARITY_MINIMUM", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x0 {"CU_MEM_ALLOC_GRANULARITY_RECOMMENDED", {"HIP_MEM_ALLOC_GRANULARITY_RECOMMENDED", "", CONV_NUMERIC_LITERAL, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 // 4. Typedefs // no analogue {"CUdevice", {"hipDevice_t", "", CONV_TYPE, API_DRIVER}}, {"CUdeviceptr", {"hipDeviceptr_t", "", CONV_TYPE, API_DRIVER}}, // cudaHostFn_t {"CUhostFn", {"hipHostFn", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // no analogue {"CUoccupancyB2DSize", {"hipOccupancyB2DSize", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaStreamCallback_t {"CUstreamCallback", {"hipStreamCallback_t", "", CONV_TYPE, API_DRIVER}}, // cudaSurfaceObject_t {"CUsurfObject", {"hipSurfaceObject", "", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaTextureObject_t {"CUtexObject", {"hipTextureObject_t", "", CONV_TYPE, API_DRIVER}}, // 5. Defines {"__CUDACC__", {"__HIPCC__", "", CONV_DEFINE, API_DRIVER}}, {"CUDA_CB", {"HIP_CB", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // cudaCpuDeviceId ((int)-1) {"CU_DEVICE_CPU", {"hipCpuDeviceId", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // ((CUdevice)-1) // cudaInvalidDeviceId ((int)-1) {"CU_DEVICE_INVALID", {"hipInvalidDeviceId", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // ((CUdevice)-2) // CUDA_IPC_HANDLE_SIZE {"CU_IPC_HANDLE_SIZE", {"HIP_IPC_HANDLE_SIZE", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 64 {"CU_LAUNCH_PARAM_BUFFER_POINTER", {"HIP_LAUNCH_PARAM_BUFFER_POINTER", "", CONV_DEFINE, API_DRIVER}}, // ((void*)0x01) {"CU_LAUNCH_PARAM_BUFFER_SIZE", {"HIP_LAUNCH_PARAM_BUFFER_SIZE", "", CONV_DEFINE, API_DRIVER}}, // ((void*)0x02) {"CU_LAUNCH_PARAM_END", {"HIP_LAUNCH_PARAM_END", "", CONV_DEFINE, API_DRIVER}}, // ((void*)0x00) // cudaHostAllocPortable {"CU_MEMHOSTALLOC_PORTABLE", {"hipHostMallocPortable", "", CONV_DEFINE, API_DRIVER}}, // 0x01 // cudaHostAllocMapped {"CU_MEMHOSTALLOC_DEVICEMAP", {"hipHostMallocMapped", "", CONV_DEFINE, API_DRIVER}}, // 0x02 // cudaHostAllocWriteCombined {"CU_MEMHOSTALLOC_WRITECOMBINED", {"hipHostMallocWriteCombined", "", CONV_DEFINE, API_DRIVER}}, // 0x04 // cudaHostRegisterPortable {"CU_MEMHOSTREGISTER_PORTABLE", {"hipHostRegisterPortable", "", CONV_DEFINE, API_DRIVER}}, // 0x01 // cudaHostRegisterMapped {"CU_MEMHOSTREGISTER_DEVICEMAP", {"hipHostRegisterMapped", "", CONV_DEFINE, API_DRIVER}}, // 0x02 // cudaHostRegisterIoMemory {"CU_MEMHOSTREGISTER_IOMEMORY", {"hipHostRegisterIoMemory", "", CONV_DEFINE, API_DRIVER}}, // 0x04 {"CU_PARAM_TR_DEFAULT", {"HIP_PARAM_TR_DEFAULT", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // -1 // cudaStreamLegacy ((cudaStream_t)0x1) {"CU_STREAM_LEGACY", {"hipStreamLegacy", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // ((CUstream)0x1) // cudaStreamPerThread ((cudaStream_t)0x2) {"CU_STREAM_PER_THREAD", {"hipStreamPerThread", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // ((CUstream)0x2) {"CU_TRSA_OVERRIDE_FORMAT", {"HIP_TRSA_OVERRIDE_FORMAT", "", CONV_DEFINE, API_DRIVER}}, // 0x01 {"CU_TRSF_NORMALIZED_COORDINATES", {"HIP_TRSF_NORMALIZED_COORDINATES", "", CONV_DEFINE, API_DRIVER}}, // 0x02 {"CU_TRSF_READ_AS_INTEGER", {"HIP_TRSF_READ_AS_INTEGER", "", CONV_DEFINE, API_DRIVER}}, // 0x01 {"CU_TRSF_SRGB", {"HIP_TRSF_SRGB", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x10 // no analogue // NOTE: Deprecated, use CUDA_ARRAY3D_LAYERED {"CUDA_ARRAY3D_2DARRAY", {"HIP_ARRAY3D_2DARRAY", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaArrayLayered {"CUDA_ARRAY3D_LAYERED", {"hipArrayLayered", "", CONV_DEFINE, API_DRIVER}}, // 0x01 // cudaArraySurfaceLoadStore {"CUDA_ARRAY3D_SURFACE_LDST", {"hipArraySurfaceLoadStore", "", CONV_DEFINE, API_DRIVER}}, // 0x02 // cudaArrayCubemap {"CUDA_ARRAY3D_CUBEMAP", {"hipArrayCubemap", "", CONV_DEFINE, API_DRIVER}}, // 0x04 // cudaArrayTextureGather {"CUDA_ARRAY3D_TEXTURE_GATHER", {"hipArrayTextureGather", "", CONV_DEFINE, API_DRIVER}}, // 0x08 // no analogue {"CUDA_ARRAY3D_DEPTH_TEXTURE", {"hipArrayDepthTexture", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x10 // cudaArrayColorAttachment {"CUDA_ARRAY3D_COLOR_ATTACHMENT", {"hipArrayColorAttachment", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x20 // cudaCooperativeLaunchMultiDeviceNoPreSync {"CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC", {"hipCooperativeLaunchMultiDeviceNoPreSync", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaCooperativeLaunchMultiDeviceNoPostSync {"CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC", {"hipCooperativeLaunchMultiDeviceNoPostSync", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaExternalMemoryDedicated {"CUDA_EXTERNAL_MEMORY_DEDICATED", {"hipExternalMemoryDedicated", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 // cudaExternalSemaphoreSignalSkipNvSciBufMemSync {"CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC", {"hipExternalSemaphoreSignalSkipNvSciBufMemSync", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x01 // cudaExternalSemaphoreWaitSkipNvSciBufMemSync {"CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC", {"hipExternalSemaphoreWaitSkipNvSciBufMemSync", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x02 // cudaNvSciSyncAttrSignal {"CUDA_NVSCISYNC_ATTR_SIGNAL", {"hipNvSciSyncAttrSignal", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 // cudaNvSciSyncAttrWait {"CUDA_NVSCISYNC_ATTR_WAIT", {"hipNvSciSyncAttrWait", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 0x1 {"CUDA_VERSION", {"HIP_VERSION", "", CONV_DEFINE, API_DRIVER, HIP_UNSUPPORTED}}, // 10000 };
1
8,903
Please remove `HIP_UNSUPPORTED`
ROCm-Developer-Tools-HIP
cpp
@@ -30,13 +30,17 @@ import { STORE_NAME as CORE_USER } from '../googlesitekit/datastore/user/constan /** * Gets the current dateRange string. * - * @param {string} [dateRange] Optional. The date range slug. + * @param {string} [dateRange] Optional. The date range slug. + * @param {boolean} [returnNumber] Optional. If true, returns the number only. * @return {string} the date range string. */ -export function getCurrentDateRange( dateRange = getCurrentDateRangeSlug() ) { +export function getCurrentDateRange( dateRange = getCurrentDateRangeSlug(), returnNumber = false ) { const daysMatch = dateRange.match( /last-(\d+)-days/ ); if ( daysMatch && daysMatch[ 1 ] ) { + if ( returnNumber ) { + return parseInt( daysMatch[ 1 ], 10 ); + } return sprintf( /* translators: %s: Number of days matched. */ _n( '%s day', '%s days', parseInt( daysMatch[ 1 ], 10 ), 'google-site-kit' ),
1
/** * Utility functions. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { __, _n, sprintf } from '@wordpress/i18n'; /** * Internal dependencies */ import Data from 'googlesitekit-data'; import { STORE_NAME as CORE_USER } from '../googlesitekit/datastore/user/constants'; /** * Gets the current dateRange string. * * @param {string} [dateRange] Optional. The date range slug. * @return {string} the date range string. */ export function getCurrentDateRange( dateRange = getCurrentDateRangeSlug() ) { const daysMatch = dateRange.match( /last-(\d+)-days/ ); if ( daysMatch && daysMatch[ 1 ] ) { return sprintf( /* translators: %s: Number of days matched. */ _n( '%s day', '%s days', parseInt( daysMatch[ 1 ], 10 ), 'google-site-kit' ), daysMatch[ 1 ] ); } throw new Error( 'Unrecognized date range slug.' ); } /** * Gets the current dateRange slug. * * @return {string} the date range slug. */ export function getCurrentDateRangeSlug() { return Data.select( CORE_USER ).getDateRange(); } /** * Gets the hash of available date ranges. * * @since 1.12.0 * * @return {Object} The object hash where every key is a date range slug, and the value is an object with the date range slug and its translation. */ export function getAvailableDateRanges() { /* translators: %s: Number of days to request data. */ const format = __( 'Last %s days', 'google-site-kit' ); return { 'last-7-days': { slug: 'last-7-days', label: sprintf( format, 7 ), }, 'last-14-days': { slug: 'last-14-days', label: sprintf( format, 14 ), }, 'last-28-days': { slug: 'last-28-days', label: sprintf( format, 28 ), }, 'last-90-days': { slug: 'last-90-days', label: sprintf( format, 90 ), }, }; }
1
32,122
Having boolean parameters to change function behavior is not a good practice because it violates the principle of functions being responsible for a single task, so the need for this indicates we need to split something out of here instead. We can introduce a function like `getCurrentDateRangeDayCount` or similar, which does this.
google-site-kit-wp
js
@@ -156,7 +156,6 @@ test.suite( await driver.get(fileServer.Pages.basicAuth) let source = await driver.getPageSource() assert.strictEqual(source.includes('Access granted!'), true) - await server.stop() }) })
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. 'use strict' const assert = require('assert') const fs = require('fs') const path = require('path') const chrome = require('../../chrome') const error = require('../../lib/error') const fileServer = require('../../lib/test/fileserver') const io = require('../../io') const test = require('../../lib/test') const until = require('../../lib/until') test.suite( function (env) { let driver before(async function () { driver = await env .builder() .setChromeOptions(new chrome.Options().headless()) .build() }) after(() => driver.quit()) it('can send commands to devtools', async function () { await driver.get(test.Pages.ajaxyPage) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.ajaxyPage) await driver.sendDevToolsCommand('Page.navigate', { url: test.Pages.echoPage, }) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.echoPage) }) it('can send commands to devtools and get return', async function () { await driver.get(test.Pages.ajaxyPage) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.ajaxyPage) await driver.get(test.Pages.echoPage) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.echoPage) let history = await driver.sendAndGetDevToolsCommand( 'Page.getNavigationHistory' ) assert(history) assert(history.currentIndex >= 2) assert.strictEqual( history.entries[history.currentIndex].url, test.Pages.echoPage ) assert.strictEqual( history.entries[history.currentIndex - 1].url, test.Pages.ajaxyPage ) }) it('sends Page.enable command using devtools', async function () { const cdpConnection = await driver.createCDPConnection('page') cdpConnection.execute('Page.enable', 1, {}, function (_res, err) { assert(!err) }) }) it('sends Network and Page command using devtools', async function () { const cdpConnection = await driver.createCDPConnection('page') cdpConnection.execute('Network.enable', 1, {}, function (_res, err) { assert(!err) }) cdpConnection.execute( 'Page.navigate', 1, { url: 'chrome://newtab/' }, function (_res, err) { assert(!err) } ) }) describe('JS CDP events', function () { it('calls the event listener for console.log', async function () { const cdpConnection = await driver.createCDPConnection('page') await driver.onLogEvent(cdpConnection, function (event) { assert.strictEqual(event['args'][0]['value'], 'here') }) await driver.executeScript('console.log("here")') }) it('calls the event listener for js exceptions', async function () { const cdpConnection = await driver.createCDPConnection('page') await driver.onLogException(cdpConnection, function (event) { assert.strictEqual( event['exceptionDetails']['stackTrace']['callFrames'][0][ 'functionName' ], 'onmouseover' ) }) await driver.get(test.Pages.javascriptPage) let element = driver.findElement({ id: 'throwing-mouseover' }) await element.click() }) }) describe('JS DOM events', function () { it('calls the event listener on dom mutations', async function () { const cdpConnection = await driver.createCDPConnection('page') await driver.logMutationEvents(cdpConnection, function (event) { assert.strictEqual(event['attribute_name'], 'style') assert.strictEqual(event['current_value'], '') assert.strictEqual(event['old_value'], 'display:none;') }) await driver.get(fileServer.Pages.dynamicPage) let element = driver.findElement({ id: 'reveal' }) await element.click() let revealed = driver.findElement({ id: 'revealed' }) await driver.wait(until.elementIsVisible(revealed), 5000) }) }) describe('Basic Auth Injection', function () { it('denies entry if username and password do not match', async function () { const pageCdpConnection = await driver.createCDPConnection('page') await driver.register('random', 'random', pageCdpConnection) await driver.get(fileServer.Pages.basicAuth) let source = await driver.getPageSource() assert.strictEqual(source.includes('Access granted!'), false) }) it('grants access if username and password are a match', async function () { const pageCdpConnection = await driver.createCDPConnection('page') await driver.register('genie', 'bottle', pageCdpConnection) await driver.get(fileServer.Pages.basicAuth) let source = await driver.getPageSource() assert.strictEqual(source.includes('Access granted!'), true) await server.stop() }) }) describe('setDownloadPath', function () { it('can enable downloads in headless mode', async function () { const dir = await io.tmpDir() await driver.setDownloadPath(dir) const url = fileServer.whereIs('/data/firefox/webextension.xpi') await driver.get(`data:text/html,<!DOCTYPE html> <div><a download="" href="${url}">Go!</a></div>`) await driver.findElement({ css: 'a' }).click() const downloadPath = path.join(dir, 'webextension.xpi') await driver.wait(() => io.exists(downloadPath), 5000) const goldenPath = path.join( __dirname, '../../lib/test/data/firefox/webextension.xpi' ) assert.strictEqual( fs.readFileSync(downloadPath, 'binary'), fs.readFileSync(goldenPath, 'binary') ) }) it('throws if path is not a directory', async function () { await assertInvalidArgumentError(() => driver.setDownloadPath()) await assertInvalidArgumentError(() => driver.setDownloadPath(null)) await assertInvalidArgumentError(() => driver.setDownloadPath('')) await assertInvalidArgumentError(() => driver.setDownloadPath(1234)) const file = await io.tmpFile() await assertInvalidArgumentError(() => driver.setDownloadPath(file)) async function assertInvalidArgumentError(fn) { try { await fn() return Promise.reject(Error('should have failed')) } catch (err) { if (err instanceof error.InvalidArgumentError) { return } throw err } } }) }) }, { browsers: ['chrome'] } )
1
18,850
Is this not required?
SeleniumHQ-selenium
java
@@ -39,17 +39,9 @@ type ( GetContractState(hash.PKHash, hash.Hash32B) (hash.Hash32B, error) SetContractState(hash.PKHash, hash.Hash32B, hash.Hash32B) error // Accounts - Balance(string) (*big.Int, error) - AccountState(string) (*Account, error) RootHash() hash.Hash32B Version() uint64 Height() uint64 - WorkingCandidates() map[hash.PKHash]*Candidate - GetCandidates(height uint64) (CandidateList, error) - - State(hash.PKHash, State) (State, error) - CachedState(hash.PKHash, State) (State, error) - PutState(hash.PKHash, State) error } // workingSet implements Workingset interface, tracks pending changes to account/contract in local cache
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package state import ( "context" "fmt" "math/big" "reflect" "sort" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/iotxaddress" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/util/byteutil" "github.com/iotexproject/iotex-core/trie" ) type ( // WorkingSet defines an interface for working set of states changes WorkingSet interface { // states and actions LoadOrCreateAccountState(string, *big.Int) (*Account, error) Nonce(string) (uint64, error) // Note that Nonce starts with 1. CachedAccountState(string) (*Account, error) RunActions(uint64, []*action.Transfer, []*action.Vote, []*action.Execution, []action.Action) (hash.Hash32B, error) Commit() error // contracts GetCodeHash(hash.PKHash) (hash.Hash32B, error) GetCode(hash.PKHash) ([]byte, error) SetCode(hash.PKHash, []byte) error GetContractState(hash.PKHash, hash.Hash32B) (hash.Hash32B, error) SetContractState(hash.PKHash, hash.Hash32B, hash.Hash32B) error // Accounts Balance(string) (*big.Int, error) AccountState(string) (*Account, error) RootHash() hash.Hash32B Version() uint64 Height() uint64 WorkingCandidates() map[hash.PKHash]*Candidate GetCandidates(height uint64) (CandidateList, error) State(hash.PKHash, State) (State, error) CachedState(hash.PKHash, State) (State, error) PutState(hash.PKHash, State) error } // workingSet implements Workingset interface, tracks pending changes to account/contract in local cache workingSet struct { ver uint64 blkHeight uint64 cachedCandidates map[hash.PKHash]*Candidate savedStates map[hash.PKHash]State // saved states before being modified in this block cachedStates map[hash.PKHash]State // states being modified in this block cachedContract map[hash.PKHash]Contract // contracts being modified in this block accountTrie trie.Trie // global account state trie cb db.CachedBatch // cached batch for pending writes dao db.KVStore // the underlying DB for account/contract storage actionHandlers []ActionHandler } ) // NewWorkingSet creates a new working set func NewWorkingSet( version uint64, kv db.KVStore, root hash.Hash32B, actionHandlers []ActionHandler, ) (WorkingSet, error) { ws := &workingSet{ ver: version, cachedCandidates: make(map[hash.PKHash]*Candidate), savedStates: make(map[hash.PKHash]State), cachedStates: make(map[hash.PKHash]State), cachedContract: make(map[hash.PKHash]Contract), cb: db.NewCachedBatch(), dao: kv, actionHandlers: actionHandlers, } tr, err := trie.NewTrieSharedBatch(ws.dao, ws.cb, trie.AccountKVNameSpace, root) if err != nil { return nil, errors.Wrap(err, "failed to generate state trie from config") } ws.accountTrie = tr if err := ws.accountTrie.Start(context.Background()); err != nil { return nil, errors.Wrapf(err, "failed to load state trie from root = %x", root) } return ws, nil } func (ws *workingSet) WorkingCandidates() map[hash.PKHash]*Candidate { return ws.cachedCandidates } //====================================== // account functions //====================================== // LoadOrCreateAccountState loads existing or adds a new account state with initial balance to the factory // addr should be a bech32 properly-encoded string func (ws *workingSet) LoadOrCreateAccountState(addr string, init *big.Int) (*Account, error) { addrHash, err := addressToPKHash(addr) if err != nil { return nil, err } state, err := ws.CachedState(addrHash, &Account{}) switch { case errors.Cause(err) == ErrAccountNotExist: account := Account{ Balance: init, VotingWeight: big.NewInt(0), } ws.cachedStates[addrHash] = &account return &account, nil case err != nil: return nil, errors.Wrapf(err, "failed to get account of %x from cached account", addrHash) } account, err := stateToAccountState(state) if err != nil { return nil, err } return account, nil } // Balance returns balance func (ws *workingSet) Balance(addr string) (*big.Int, error) { state, err := ws.AccountState(addr) if err != nil { return nil, errors.Wrapf(err, "failed to get account state of %s", addr) } return state.Balance, nil } // Nonce returns the Nonce if the account exists func (ws *workingSet) Nonce(addr string) (uint64, error) { state, err := ws.AccountState(addr) if err != nil { return 0, errors.Wrapf(err, "failed to get account state of %s", addr) } return state.Nonce, nil } // account returns the confirmed account state on the chain func (ws *workingSet) AccountState(addr string) (*Account, error) { addrHash, err := addressToPKHash(addr) if err != nil { return nil, err } state, ok := ws.savedStates[addrHash] if !ok { state, err = ws.State(addrHash, &Account{}) if err != nil { return nil, err } } account, err := stateToAccountState(state) if err != nil { return nil, err } return account, nil } // CachedAccountState returns the cached account state if the address exists in local cache func (ws *workingSet) CachedAccountState(addr string) (*Account, error) { addrHash, err := addressToPKHash(addr) if err != nil { return nil, err } if contract, ok := ws.cachedContract[addrHash]; ok { return contract.SelfState(), nil } state, err := ws.CachedState(addrHash, &Account{}) if err != nil { return nil, err } account, err := stateToAccountState(state) if err != nil { return nil, err } return account, nil } // RootHash returns the hash of the root node of the accountTrie func (ws *workingSet) RootHash() hash.Hash32B { return ws.accountTrie.RootHash() } // Version returns the Version of this working set func (ws *workingSet) Version() uint64 { return ws.ver } // Height returns the Height of the block being worked on func (ws *workingSet) Height() uint64 { return ws.blkHeight } // RunActions runs actions in the block and track pending changes in working set func (ws *workingSet) RunActions( blockHeight uint64, tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution, actions []action.Action) (hash.Hash32B, error) { ws.blkHeight = blockHeight // Recover cachedCandidates after restart factory if blockHeight > 0 && len(ws.cachedCandidates) == 0 { candidates, err := ws.GetCandidates(blockHeight - 1) if err != nil { return hash.ZeroHash32B, errors.Wrapf(err, "failed to get previous Candidates on Height %d", blockHeight-1) } if ws.cachedCandidates, err = CandidatesToMap(candidates); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to convert candidate list to map of cached Candidates") } } if err := ws.handleTsf(tsf); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to handle transfers") } if err := ws.handleVote(blockHeight, vote); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to handle votes") } // update pending account changes to trie for addr, state := range ws.cachedStates { if err := ws.PutState(addr, state); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to update pending account changes to trie") } account, err := stateToAccountState(state) if err != nil { return hash.ZeroHash32B, err } // Perform vote update operation on candidate and delegate pools if !account.IsCandidate { // remove the candidate if the person is not a candidate anymore if _, ok := ws.cachedCandidates[addr]; ok { delete(ws.cachedCandidates, addr) } continue } totalWeight := big.NewInt(0) totalWeight.Add(totalWeight, account.VotingWeight) voteePKHash, err := addressToPKHash(account.Votee) if err != nil { return hash.ZeroHash32B, err } if addr == voteePKHash { totalWeight.Add(totalWeight, account.Balance) } ws.updateCandidate(addr, totalWeight, blockHeight) } // update pending contract changes for addr, contract := range ws.cachedContract { if err := contract.Commit(); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to update pending contract changes") } state := contract.SelfState() // store the account (with new storage trie root) into account trie if err := ws.PutState(addr, state); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to update pending contract account changes to trie") } } // increase Executor's Nonce for every execution in this block for _, e := range executions { executorPKHash, err := addressToPKHash(e.Executor()) if err != nil { return hash.ZeroHash32B, err } state, err := ws.CachedState(executorPKHash, &Account{}) if err != nil { return hash.ZeroHash32B, errors.Wrap(err, "executor does not exist") } account, err := stateToAccountState(state) if err != nil { return hash.ZeroHash32B, err } // save account before modifying ws.saveAccount(executorPKHash, account) if e.Nonce() > account.Nonce { account.Nonce = e.Nonce() } if err := ws.PutState(executorPKHash, state); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to update pending account changes to trie") } } for _, act := range actions { for _, actionHandler := range ws.actionHandlers { if err := actionHandler.Handle(act, ws); err != nil { return hash.ZeroHash32B, errors.Wrapf(err, "error when action %x mutates states", act.Hash()) } } } // Persist accountTrie's root hash rootHash := ws.accountTrie.RootHash() if err := ws.dao.Put(trie.AccountKVNameSpace, []byte(AccountTrieRootKey), rootHash[:]); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to store accountTrie's root hash") } // Persist new list of Candidates candidates, err := MapToCandidates(ws.cachedCandidates) if err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to convert map of cached Candidates to candidate list") } sort.Sort(candidates) candidatesBytes, err := candidates.Serialize() if err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to serialize Candidates") } h := byteutil.Uint64ToBytes(blockHeight) if err := ws.dao.Put(trie.CandidateKVNameSpace, h, candidatesBytes); err != nil { return hash.ZeroHash32B, errors.Wrapf(err, "failed to store Candidates on Height %d", blockHeight) } // Persist current chain Height if err := ws.dao.Put(trie.AccountKVNameSpace, []byte(CurrentHeightKey), h); err != nil { return hash.ZeroHash32B, errors.Wrap(err, "failed to store accountTrie's current Height") } return ws.RootHash(), nil } // Commit persists all changes in RunActions() into the DB func (ws *workingSet) Commit() error { // Commit all changes in a batch if err := ws.accountTrie.Commit(); err != nil { return errors.Wrap(err, "failed to Commit all changes to underlying DB in a batch") } ws.clearCache() return nil } //====================================== // Contract functions //====================================== // GetCodeHash returns contract's code hash func (ws *workingSet) GetCodeHash(addr hash.PKHash) (hash.Hash32B, error) { if contract, ok := ws.cachedContract[addr]; ok { return byteutil.BytesTo32B(contract.SelfState().CodeHash), nil } state, err := ws.CachedState(addr, &Account{}) if err != nil { return hash.ZeroHash32B, errors.Wrapf(err, "failed to GetCodeHash for contract %x", addr) } account, err := stateToAccountState(state) if err != nil { return hash.ZeroHash32B, err } return byteutil.BytesTo32B(account.CodeHash), nil } // GetCode returns contract's code func (ws *workingSet) GetCode(addr hash.PKHash) ([]byte, error) { if contract, ok := ws.cachedContract[addr]; ok { return contract.GetCode() } state, err := ws.CachedState(addr, &Account{}) if err != nil { return nil, errors.Wrapf(err, "failed to GetCode for contract %x", addr) } account, err := stateToAccountState(state) if err != nil { return nil, err } return ws.dao.Get(trie.CodeKVNameSpace, account.CodeHash[:]) } // SetCode sets contract's code func (ws *workingSet) SetCode(addr hash.PKHash, code []byte) error { if contract, ok := ws.cachedContract[addr]; ok { contract.SetCode(byteutil.BytesTo32B(hash.Hash256b(code)), code) return nil } contract, err := ws.getContract(addr) if err != nil { return errors.Wrapf(err, "failed to SetCode for contract %x", addr) } contract.SetCode(byteutil.BytesTo32B(hash.Hash256b(code)), code) return nil } // GetContractState returns contract's storage value func (ws *workingSet) GetContractState(addr hash.PKHash, key hash.Hash32B) (hash.Hash32B, error) { if contract, ok := ws.cachedContract[addr]; ok { v, err := contract.GetState(key) return byteutil.BytesTo32B(v), err } contract, err := ws.getContract(addr) if err != nil { return hash.ZeroHash32B, errors.Wrapf(err, "failed to GetContractState for contract %x", addr) } v, err := contract.GetState(key) return byteutil.BytesTo32B(v), err } // SetContractState writes contract's storage value func (ws *workingSet) SetContractState(addr hash.PKHash, key, value hash.Hash32B) error { if contract, ok := ws.cachedContract[addr]; ok { return contract.SetState(key, value[:]) } contract, err := ws.getContract(addr) if err != nil { return errors.Wrapf(err, "failed to SetContractState for contract %x", addr) } return contract.SetState(key, value[:]) } // State pulls a state from DB func (ws *workingSet) State(hash hash.PKHash, s State) (State, error) { mstate, err := ws.accountTrie.Get(hash[:]) if errors.Cause(err) == trie.ErrNotExist { return nil, errors.Wrapf(ErrAccountNotExist, "addrHash = %x", hash[:]) } if err != nil { return nil, errors.Wrapf(err, "failed to get account of %x", hash) } if err := s.Deserialize(mstate); err != nil { return nil, err } return s, nil } // CachedState pulls a state from cache first. If missing, it will hit DB func (ws *workingSet) CachedState(hash hash.PKHash, s State) (State, error) { if state, ok := ws.cachedStates[hash]; ok { return state, nil } // add to local cache state, err := ws.State(hash, s) if state != nil { ws.cachedStates[hash] = state } return state, err } // PutState put a state into DB func (ws *workingSet) PutState(pkHash hash.PKHash, state State) error { ss, err := state.Serialize() if err != nil { return errors.Wrapf(err, "failed to convert account %v to bytes", state) } return ws.accountTrie.Upsert(pkHash[:], ss) } //====================================== // private account/account functions //====================================== func (ws *workingSet) saveAccount(hash hash.PKHash, account *Account) { if _, ok := ws.savedStates[hash]; !ok { ws.savedStates[hash] = account.clone() } } func (ws *workingSet) getContract(addr hash.PKHash) (Contract, error) { state, err := ws.CachedState(addr, &Account{}) if err != nil { return nil, errors.Wrapf(err, "failed to get the cached account of %x", addr) } account, err := stateToAccountState(state) if err != nil { return nil, err } delete(ws.cachedStates, addr) if account.Root == hash.ZeroHash32B { account.Root = trie.EmptyRoot } tr, err := trie.NewTrieSharedBatch(ws.dao, ws.cb, trie.ContractKVNameSpace, account.Root) if err != nil { return nil, errors.Wrapf(err, "failed to create storage trie for new contract %x", addr) } // add to contract cache contract := newContract(account, tr) ws.cachedContract[addr] = contract return contract, nil } // clearCache removes all local changes after committing to trie func (ws *workingSet) clearCache() { ws.savedStates = nil ws.cachedStates = nil ws.cachedContract = nil ws.savedStates = make(map[hash.PKHash]State) ws.cachedStates = make(map[hash.PKHash]State) ws.cachedContract = make(map[hash.PKHash]Contract) } //====================================== // private candidate functions //====================================== func (ws *workingSet) updateCandidate(pkHash hash.PKHash, totalWeight *big.Int, blockHeight uint64) { // Candidate was added when self-nomination, always exist in cachedCandidates candidate := ws.cachedCandidates[pkHash] candidate.Votes = totalWeight candidate.LastUpdateHeight = blockHeight } func (ws *workingSet) GetCandidates(height uint64) (CandidateList, error) { candidatesBytes, err := ws.dao.Get(trie.CandidateKVNameSpace, byteutil.Uint64ToBytes(height)) if err != nil { return []*Candidate{}, errors.Wrapf(err, "failed to get Candidates on Height %d", height) } return CandidateList{}.Deserialize(candidatesBytes) } //====================================== // private transfer/vote functions //====================================== func (ws *workingSet) handleTsf(tsf []*action.Transfer) error { for _, tx := range tsf { if tx.IsContract() { continue } if !tx.IsCoinbase() { // check sender sender, err := ws.LoadOrCreateAccountState(tx.Sender(), big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to load or create the account of sender %s", tx.Sender()) } // save account before modifying senderPKHash, err := addressToPKHash(tx.Sender()) if err != nil { return err } ws.saveAccount(senderPKHash, sender) if tx.Amount().Cmp(sender.Balance) == 1 { return errors.Wrapf(ErrNotEnoughBalance, "failed to verify the Balance of sender %s", tx.Sender()) } // update sender Balance if err := sender.SubBalance(tx.Amount()); err != nil { return errors.Wrapf(err, "failed to update the Balance of sender %s", tx.Sender()) } // update sender Nonce if tx.Nonce() > sender.Nonce { sender.Nonce = tx.Nonce() } // Update sender votes if len(sender.Votee) > 0 && sender.Votee != tx.Sender() { // sender already voted to a different person voteeOfSender, err := ws.LoadOrCreateAccountState(sender.Votee, big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to load or create the account of sender's votee %s", sender.Votee) } // save account before modifying voteePKHash, err := addressToPKHash(sender.Votee) if err != nil { return err } ws.saveAccount(voteePKHash, voteeOfSender) voteeOfSender.VotingWeight.Sub(voteeOfSender.VotingWeight, tx.Amount()) } } // check recipient recipient, err := ws.LoadOrCreateAccountState(tx.Recipient(), big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to laod or create the account of recipient %s", tx.Recipient()) } // save account before modifying recipientPKHash, err := addressToPKHash(tx.Recipient()) if err != nil { return err } ws.saveAccount(recipientPKHash, recipient) // update recipient Balance if err := recipient.AddBalance(tx.Amount()); err != nil { return errors.Wrapf(err, "failed to update the Balance of recipient %s", tx.Recipient()) } // Update recipient votes if len(recipient.Votee) > 0 && recipient.Votee != tx.Recipient() { // recipient already voted to a different person voteeOfRecipient, err := ws.LoadOrCreateAccountState(recipient.Votee, big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to load or create the account of recipient's votee %s", recipient.Votee) } // save account before modifying voteePKHash, err := addressToPKHash(recipient.Votee) if err != nil { return err } ws.saveAccount(voteePKHash, voteeOfRecipient) voteeOfRecipient.VotingWeight.Add(voteeOfRecipient.VotingWeight, tx.Amount()) } } return nil } func (ws *workingSet) handleVote(blockHeight uint64, vote []*action.Vote) error { for _, v := range vote { voteFrom, err := ws.LoadOrCreateAccountState(v.Voter(), big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to load or create the account of voter %s", v.Voter()) } // save account before modifying voterPKHash, err := addressToPKHash(v.Voter()) if err != nil { return err } ws.saveAccount(voterPKHash, voteFrom) // update voteFrom Nonce if v.Nonce() > voteFrom.Nonce { voteFrom.Nonce = v.Nonce() } // Update old votee's weight if len(voteFrom.Votee) > 0 && voteFrom.Votee != v.Voter() { // voter already voted oldVotee, err := ws.LoadOrCreateAccountState(voteFrom.Votee, big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to load or create the account of voter's old votee %s", voteFrom.Votee) } // save account before modifying voteePKHash, err := addressToPKHash(voteFrom.Votee) if err != nil { return err } ws.saveAccount(voteePKHash, oldVotee) oldVotee.VotingWeight.Sub(oldVotee.VotingWeight, voteFrom.Balance) voteFrom.Votee = "" } if v.Votee() == "" { // unvote operation voteFrom.IsCandidate = false continue } voteTo, err := ws.LoadOrCreateAccountState(v.Votee(), big.NewInt(0)) if err != nil { return errors.Wrapf(err, "failed to load or create the account of votee %s", v.Votee()) } // save account before modifying voteePKHash, err := addressToPKHash(v.Votee()) if err != nil { return err } ws.saveAccount(voteePKHash, voteTo) if v.Voter() != v.Votee() { // Voter votes to a different person voteTo.VotingWeight.Add(voteTo.VotingWeight, voteFrom.Balance) voteFrom.Votee = v.Votee() } else { // Vote to self: self-nomination or cancel the previous vote case voteFrom.Votee = v.Voter() voteFrom.IsCandidate = true votePubkey := v.VoterPublicKey() if _, ok := ws.cachedCandidates[voterPKHash]; !ok { ws.cachedCandidates[voterPKHash] = &Candidate{ Address: v.Voter(), PublicKey: votePubkey, CreationHeight: blockHeight, } } } } return nil } func addressToPKHash(addr string) (hash.PKHash, error) { var pkHash hash.PKHash senderPKHashBytes, err := iotxaddress.GetPubkeyHash(addr) if err != nil { return pkHash, errors.Wrap(err, "cannot get the hash of the address") } return byteutil.BytesTo20B(senderPKHashBytes), nil } func stateToAccountState(state State) (*Account, error) { account, ok := state.(*Account) if !ok { return nil, fmt.Errorf("error when casting state of %s into account state", reflect.TypeOf(state).String()) } return account, nil }
1
12,698
savedStates is for confirmed states, not needed in working set
iotexproject-iotex-core
go
@@ -23,6 +23,9 @@ import ( "github.com/mysteriumnetwork/node/market" ) +// ServiceType indicates "wireguard" service type +const ServiceType = "wireguard" + // Bootstrap is called on program initialization time and registers various deserializers related to wireguard service func Bootstrap() { market.RegisterServiceDefinitionUnserializer(
1
/* * Copyright (C) 2018 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package wireguard import ( "encoding/json" "github.com/mysteriumnetwork/node/market" ) // Bootstrap is called on program initialization time and registers various deserializers related to wireguard service func Bootstrap() { market.RegisterServiceDefinitionUnserializer( ServiceType, func(rawDefinition *json.RawMessage) (market.ServiceDefinition, error) { var definition ServiceDefinition err := json.Unmarshal(*rawDefinition, &definition) return definition, err }, ) // TODO per time or per bytes payment methods should be defined here market.RegisterPaymentMethodUnserializer( PaymentMethod, func(rawDefinition *json.RawMessage) (market.PaymentMethod, error) { var method Payment err := json.Unmarshal(*rawDefinition, &method) return method, err }, ) }
1
13,835
Why should we move it into `bootstrap.go` file?
mysteriumnetwork-node
go
@@ -24,8 +24,15 @@ const fontSizes = [ 72 ] +const measures = [ + 24, + 32, + 48 +] + module.exports = { breakpoints, space, fontSizes, + measures }
1
const breakpoints = [ 40, 52, 64 ] const space = [ 0, 8, 16, 32, 64 ] const fontSizes = [ 12, 14, 16, 20, 24, 32, 48, 64, 72 ] module.exports = { breakpoints, space, fontSizes, }
1
4,464
In the next major version, I plan on changing the em-unit breakpoints to be pixel values to keep everything consistent. Would be great to handle this with pixels as well, but still allow em-units to be defined with a string
styled-system-styled-system
js
@@ -151,7 +151,7 @@ class TabWidget(QTabWidget): fields = self.get_tab_fields(idx) fields['current_title'] = fields['current_title'].replace('&', '&&') - fields['index'] = idx + 1 + fields['index'] = str(idx + 1).rjust(2) title = '' if fmt is None else fmt.format(**fields) tabbar = self.tabBar()
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The tab widget used for TabbedBrowser from browser.py.""" import typing import functools import contextlib import attr from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QSize, QRect, QPoint, QTimer, QUrl) from PyQt5.QtWidgets import (QTabWidget, QTabBar, QSizePolicy, QCommonStyle, QStyle, QStylePainter, QStyleOptionTab, QStyleFactory, QWidget) from PyQt5.QtGui import QIcon, QPalette, QColor from qutebrowser.utils import qtutils, objreg, utils, usertypes, log from qutebrowser.config import config, stylesheet from qutebrowser.misc import objects, debugcachestats from qutebrowser.browser import browsertab class TabWidget(QTabWidget): """The tab widget used for TabbedBrowser. Signals: tab_index_changed: Emitted when the current tab was changed. arg 0: The index of the tab which is now focused. arg 1: The total count of tabs. new_tab_requested: Emitted when a new tab is requested. """ tab_index_changed = pyqtSignal(int, int) new_tab_requested = pyqtSignal('QUrl', bool, bool) # Strings for controlling the mute/audible text MUTE_STRING = '[M] ' AUDIBLE_STRING = '[A] ' def __init__(self, win_id, parent=None): super().__init__(parent) bar = TabBar(win_id, self) self.setStyle(TabBarStyle()) self.setTabBar(bar) bar.tabCloseRequested.connect( self.tabCloseRequested) # type: ignore[arg-type] bar.tabMoved.connect(functools.partial( QTimer.singleShot, 0, self.update_tab_titles)) bar.currentChanged.connect(self._on_current_changed) bar.new_tab_requested.connect(self._on_new_tab_requested) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.setDocumentMode(True) self.setElideMode(Qt.ElideRight) self.setUsesScrollButtons(True) bar.setDrawBase(False) self._init_config() config.instance.changed.connect(self._init_config) @config.change_filter('tabs') def _init_config(self): """Initialize attributes based on the config.""" tabbar = self.tabBar() self.setMovable(True) self.setTabsClosable(False) position = config.val.tabs.position selection_behavior = config.val.tabs.select_on_remove self.setTabPosition(position) tabbar.vertical = position in [ # type: ignore[attr-defined] QTabWidget.West, QTabWidget.East] tabbar.setSelectionBehaviorOnRemove(selection_behavior) tabbar.refresh() def set_tab_indicator_color(self, idx, color): """Set the tab indicator color. Args: idx: The tab index. color: A QColor. """ bar = self.tabBar() bar.set_tab_data(idx, 'indicator-color', color) bar.update(bar.tabRect(idx)) def set_tab_pinned(self, tab: QWidget, pinned: bool) -> None: """Set the tab status as pinned. Args: tab: The tab to pin pinned: Pinned tab state to set. """ idx = self.indexOf(tab) tab.data.pinned = pinned self.update_tab_favicon(tab) self.update_tab_title(idx) def tab_indicator_color(self, idx): """Get the tab indicator color for the given index.""" return self.tabBar().tab_indicator_color(idx) def set_page_title(self, idx, title): """Set the tab title user data.""" tabbar = self.tabBar() if config.cache['tabs.tooltips']: # always show only plain title in tooltips tabbar.setTabToolTip(idx, title) tabbar.set_tab_data(idx, 'page-title', title) self.update_tab_title(idx) def page_title(self, idx): """Get the tab title user data.""" return self.tabBar().page_title(idx) def update_tab_title(self, idx, field=None): """Update the tab text for the given tab. Args: idx: The tab index to update. field: A field name which was updated. If given, the title is only set if the given field is in the template. """ tab = self.widget(idx) if tab.data.pinned: fmt = config.cache['tabs.title.format_pinned'] else: fmt = config.cache['tabs.title.format'] if (field is not None and (fmt is None or ('{' + field + '}') not in fmt)): return fields = self.get_tab_fields(idx) fields['current_title'] = fields['current_title'].replace('&', '&&') fields['index'] = idx + 1 title = '' if fmt is None else fmt.format(**fields) tabbar = self.tabBar() # Only change the tab title if it changes, setting the tab title causes # a size recalculation which is slow. if tabbar.tabText(idx) != title: tabbar.setTabText(idx, title) def get_tab_fields(self, idx): """Get the tab field data.""" tab = self.widget(idx) if tab is None: log.misc.debug( # type: ignore[unreachable] "Got None-tab in get_tab_fields!") page_title = self.page_title(idx) fields = {} fields['id'] = tab.tab_id fields['current_title'] = page_title fields['title_sep'] = ' - ' if page_title else '' fields['perc_raw'] = tab.progress() fields['backend'] = objects.backend.name fields['private'] = ' [Private Mode] ' if tab.is_private else '' try: if tab.audio.is_muted(): fields['audio'] = TabWidget.MUTE_STRING elif tab.audio.is_recently_audible(): fields['audio'] = TabWidget.AUDIBLE_STRING else: fields['audio'] = '' except browsertab.WebTabError: # Muting is only implemented with QtWebEngine fields['audio'] = '' if tab.load_status() == usertypes.LoadStatus.loading: fields['perc'] = '[{}%] '.format(tab.progress()) else: fields['perc'] = '' try: url = self.tab_url(idx) except qtutils.QtValueError: fields['host'] = '' fields['current_url'] = '' fields['protocol'] = '' else: fields['host'] = url.host() fields['current_url'] = url.toDisplayString() fields['protocol'] = url.scheme() y = tab.scroller.pos_perc()[1] if y is None: scroll_pos = '???' elif y <= 0: scroll_pos = 'top' elif y >= 100: scroll_pos = 'bot' else: scroll_pos = '{:2}%'.format(y) fields['scroll_pos'] = scroll_pos return fields @contextlib.contextmanager def _toggle_visibility(self): """Toggle visibility while running. Every single call to setTabText calls the size hinting functions for every single tab, which are slow. Since we know we are updating all the tab's titles, we can delay this processing by making the tab non-visible. To avoid flickering, disable repaint updates whlie we work. """ bar = self.tabBar() toggle = (self.count() > 10 and not bar.drag_in_progress and bar.isVisible()) if toggle: bar.setUpdatesEnabled(False) bar.setVisible(False) yield if toggle: bar.setVisible(True) bar.setUpdatesEnabled(True) def update_tab_titles(self): """Update all texts.""" with self._toggle_visibility(): for idx in range(self.count()): self.update_tab_title(idx) def tabInserted(self, idx): """Update titles when a tab was inserted.""" super().tabInserted(idx) self.update_tab_titles() def tabRemoved(self, idx): """Update titles when a tab was removed.""" super().tabRemoved(idx) self.update_tab_titles() def addTab(self, page, icon_or_text, text_or_empty=None): """Override addTab to use our own text setting logic. Unfortunately QTabWidget::addTab has these two overloads: - QWidget * page, const QIcon & icon, const QString & label - QWidget * page, const QString & label This means we'll get different arguments based on the chosen overload. Args: page: The QWidget to add. icon_or_text: Either the QIcon to add or the label. text_or_empty: Either the label or None. Return: The index of the newly added tab. """ if text_or_empty is None: text = icon_or_text new_idx = super().addTab(page, '') else: icon = icon_or_text text = text_or_empty new_idx = super().addTab(page, icon, '') self.set_page_title(new_idx, text) return new_idx def insertTab(self, idx, page, icon_or_text, text_or_empty=None): """Override insertTab to use our own text setting logic. Unfortunately QTabWidget::insertTab has these two overloads: - int index, QWidget * page, const QIcon & icon, const QString & label - int index, QWidget * page, const QString & label This means we'll get different arguments based on the chosen overload. Args: idx: Where to insert the widget. page: The QWidget to add. icon_or_text: Either the QIcon to add or the label. text_or_empty: Either the label or None. Return: The index of the newly added tab. """ if text_or_empty is None: text = icon_or_text new_idx = super().insertTab(idx, page, '') else: icon = icon_or_text text = text_or_empty new_idx = super().insertTab(idx, page, icon, '') self.set_page_title(new_idx, text) return new_idx @pyqtSlot(int) def _on_current_changed(self, index): """Emit the tab_index_changed signal if the current tab changed.""" self.tabBar().on_current_changed() self.tab_index_changed.emit(index, self.count()) @pyqtSlot() def _on_new_tab_requested(self): """Open a new tab.""" self.new_tab_requested.emit(config.val.url.default_page, False, False) def tab_url(self, idx): """Get the URL of the tab at the given index. Return: The tab URL as QUrl. """ tab = self.widget(idx) if tab is None: url = QUrl() # type: ignore[unreachable] else: url = tab.url() # It's possible for url to be invalid, but the caller will handle that. qtutils.ensure_valid(url) return url def update_tab_favicon(self, tab: QWidget) -> None: """Update favicon of the given tab.""" idx = self.indexOf(tab) if tab.data.should_show_icon(): self.setTabIcon(idx, tab.icon()) if config.val.tabs.tabs_are_windows: self.window().setWindowIcon(tab.icon()) else: self.setTabIcon(idx, QIcon()) if config.val.tabs.tabs_are_windows: self.window().setWindowIcon(self.window().windowIcon()) def setTabIcon(self, idx: int, icon: QIcon) -> None: """Always show tab icons for pinned tabs in some circumstances.""" tab = typing.cast(typing.Optional[browsertab.AbstractTab], self.widget(idx)) if (icon.isNull() and config.cache['tabs.favicons.show'] != 'never' and config.cache['tabs.pinned.shrink'] and not self.tabBar().vertical and tab is not None and tab.data.pinned): icon = self.style().standardIcon(QStyle.SP_FileIcon) super().setTabIcon(idx, icon) class TabBar(QTabBar): """Custom tab bar with our own style. FIXME: Dragging tabs doesn't look as nice as it does in QTabBar. However, fixing this would be a lot of effort, so we'll postpone it until we're reimplementing drag&drop for other reasons. https://github.com/qutebrowser/qutebrowser/issues/126 Attributes: vertical: When the tab bar is currently vertical. win_id: The window ID this TabBar belongs to. Signals: new_tab_requested: Emitted when a new tab is requested. """ STYLESHEET = """ TabBar { font: {{ conf.fonts.tabs.unselected }}; background-color: {{ conf.colors.tabs.bar.bg }}; } TabBar::tab:selected { font: {{ conf.fonts.tabs.selected }}; } """ new_tab_requested = pyqtSignal() def __init__(self, win_id, parent=None): super().__init__(parent) self._win_id = win_id self.setStyle(TabBarStyle()) self.vertical = False self._auto_hide_timer = QTimer() self._auto_hide_timer.setSingleShot(True) self._auto_hide_timer.timeout.connect(self.maybe_hide) self._on_show_switching_delay_changed() self.setAutoFillBackground(True) self.drag_in_progress = False stylesheet.set_register(self) self.ensurePolished() config.instance.changed.connect(self._on_config_changed) self._set_icon_size() QTimer.singleShot(0, self.maybe_hide) def __repr__(self): return utils.get_repr(self, count=self.count()) def _current_tab(self): """Get the current tab object.""" return self.parent().currentWidget() @pyqtSlot(str) def _on_config_changed(self, option: str) -> None: if option.startswith('fonts.tabs.'): self.ensurePolished() self._set_icon_size() elif option == 'tabs.favicons.scale': self._set_icon_size() elif option == 'tabs.show_switching_delay': self._on_show_switching_delay_changed() elif option == 'tabs.show': self.maybe_hide() if option.startswith('colors.tabs.'): self.update() # Clear tab size caches when appropriate if option in ["tabs.indicator.padding", "tabs.padding", "tabs.indicator.width", "tabs.min_width", "tabs.pinned.shrink", "fonts.tabs.selected", "fonts.tabs.unselected"]: self._minimum_tab_size_hint_helper.cache_clear() self._minimum_tab_height.cache_clear() def _on_show_switching_delay_changed(self): """Set timer interval when tabs.show_switching_delay got changed.""" self._auto_hide_timer.setInterval(config.val.tabs.show_switching_delay) def on_current_changed(self): """Show tab bar when current tab got changed.""" self.maybe_hide() # for fullscreen tabs if config.val.tabs.show == 'switching': self.show() self._auto_hide_timer.start() @pyqtSlot() def maybe_hide(self): """Hide the tab bar if needed.""" show = config.val.tabs.show tab = self._current_tab() if (show in ['never', 'switching'] or (show == 'multiple' and self.count() == 1) or (tab and tab.data.fullscreen)): self.hide() else: self.show() def set_tab_data(self, idx, key, value): """Set tab data as a dictionary.""" if not 0 <= idx < self.count(): raise IndexError("Tab index ({}) out of range ({})!".format( idx, self.count())) data = self.tabData(idx) if data is None: data = {} data[key] = value self.setTabData(idx, data) def tab_data(self, idx, key): """Get tab data for a given key.""" if not 0 <= idx < self.count(): raise IndexError("Tab index ({}) out of range ({})!".format( idx, self.count())) data = self.tabData(idx) if data is None: data = {} return data[key] def tab_indicator_color(self, idx): """Get the tab indicator color for the given index.""" try: return self.tab_data(idx, 'indicator-color') except KeyError: return QColor() def page_title(self, idx): """Get the tab title user data. Args: idx: The tab index to get the title for. handle_unset: Whether to return an empty string on KeyError. """ try: return self.tab_data(idx, 'page-title') except KeyError: return '' def refresh(self): """Properly repaint the tab bar and relayout tabs.""" # This is a horrible hack, but we need to do this so the underlying Qt # code sets layoutDirty so it actually relayouts the tabs. self.setIconSize(self.iconSize()) def _set_icon_size(self): """Set the tab bar favicon size.""" size = self.fontMetrics().height() - 2 size = int(size * config.val.tabs.favicons.scale) self.setIconSize(QSize(size, size)) def mouseReleaseEvent(self, e): """Override mouseReleaseEvent to know when drags stop.""" self.drag_in_progress = False super().mouseReleaseEvent(e) def mousePressEvent(self, e): """Override mousePressEvent to close tabs if configured. Also keep track of if we are currently in a drag.""" self.drag_in_progress = True button = config.val.tabs.close_mouse_button if (e.button() == Qt.RightButton and button == 'right' or e.button() == Qt.MiddleButton and button == 'middle'): e.accept() idx = self.tabAt(e.pos()) if idx == -1: action = config.val.tabs.close_mouse_button_on_bar if action == 'ignore': return elif action == 'new-tab': self.new_tab_requested.emit() return elif action == 'close-current': idx = self.currentIndex() elif action == 'close-last': idx = self.count() - 1 self.tabCloseRequested.emit(idx) return super().mousePressEvent(e) def minimumTabSizeHint(self, index: int, ellipsis: bool = True) -> QSize: """Set the minimum tab size to indicator/icon/... text. Args: index: The index of the tab to get a size hint for. ellipsis: Whether to use ellipsis to calculate width instead of the tab's text. Forced to False for pinned tabs. Return: A QSize of the smallest tab size we can make. """ icon = self.tabIcon(index) if icon.isNull(): icon_width = 0 else: icon_width = min( icon.actualSize(self.iconSize()).width(), self.iconSize().width()) + TabBarStyle.ICON_PADDING pinned = self._tab_pinned(index) if not self.vertical and pinned and config.val.tabs.pinned.shrink: # Never consider ellipsis an option for horizontal pinned tabs ellipsis = False return self._minimum_tab_size_hint_helper(self.tabText(index), icon_width, ellipsis, pinned) @debugcachestats.register(name='tab width cache') @functools.lru_cache(maxsize=2**9) def _minimum_tab_size_hint_helper(self, tab_text: str, icon_width: int, ellipsis: bool, pinned: bool) -> QSize: """Helper function to cache tab results. Config values accessed in here should be added to _on_config_changed to ensure cache is flushed when needed. """ text = '\u2026' if ellipsis else tab_text # Don't ever shorten if text is shorter than the ellipsis def _text_to_width(text): # Calculate text width taking into account qt mnemonics return self.fontMetrics().size(Qt.TextShowMnemonic, text).width() text_width = min(_text_to_width(text), _text_to_width(tab_text)) padding = config.cache['tabs.padding'] indicator_width = config.cache['tabs.indicator.width'] indicator_padding = config.cache['tabs.indicator.padding'] padding_h = padding.left + padding.right # Only add padding if indicator exists if indicator_width != 0: padding_h += indicator_padding.left + indicator_padding.right height = self._minimum_tab_height() width = (text_width + icon_width + padding_h + indicator_width) min_width = config.cache['tabs.min_width'] if (not self.vertical and min_width > 0 and not pinned or not config.cache['tabs.pinned.shrink']): width = max(min_width, width) return QSize(width, height) @functools.lru_cache(maxsize=1) def _minimum_tab_height(self): padding = config.cache['tabs.padding'] return self.fontMetrics().height() + padding.top + padding.bottom def _tab_pinned(self, index: int) -> bool: """Return True if tab is pinned.""" if not 0 <= index < self.count(): raise IndexError("Tab index ({}) out of range ({})!".format( index, self.count())) widget = self.parent().widget(index) if widget is None: # This could happen when Qt calls tabSizeHint while initializing # tabs. return False return widget.data.pinned def tabSizeHint(self, index: int) -> QSize: """Override tabSizeHint to customize qb's tab size. https://wiki.python.org/moin/PyQt/Customising%20tab%20bars Args: index: The index of the tab. Return: A QSize. """ if self.count() == 0: # This happens on startup on macOS. # We return it directly rather than setting `size' because we don't # want to ensure it's valid in this special case. return QSize() height = self._minimum_tab_height() if self.vertical: confwidth = str(config.cache['tabs.width']) if confwidth.endswith('%'): main_window = objreg.get('main-window', scope='window', window=self._win_id) perc = int(confwidth.rstrip('%')) width = main_window.width() * perc / 100 else: width = int(confwidth) size = QSize(width, height) else: if config.cache['tabs.pinned.shrink'] and self._tab_pinned(index): # Give pinned tabs the minimum size they need to display their # titles, let Qt handle scaling it down if we get too small. width = self.minimumTabSizeHint(index, ellipsis=False).width() else: # Request as much space as possible so we fill the tabbar, let # Qt shrink us down. If for some reason (tests, bugs) # self.width() gives 0, use a sane min of 10 px width = max(self.width(), 10) max_width = config.cache['tabs.max_width'] if max_width > 0: width = min(max_width, width) size = QSize(width, height) qtutils.ensure_valid(size) return size def paintEvent(self, event): """Override paintEvent to draw the tabs like we want to.""" p = QStylePainter(self) selected = self.currentIndex() for idx in range(self.count()): if not event.region().intersects(self.tabRect(idx)): # Don't repaint if we are outside the requested region continue tab = QStyleOptionTab() self.initStyleOption(tab, idx) setting = 'colors.tabs' if self._tab_pinned(idx): setting += '.pinned' if idx == selected: setting += '.selected' setting += '.odd' if (idx + 1) % 2 else '.even' tab.palette.setColor(QPalette.Window, config.cache[setting + '.bg']) tab.palette.setColor(QPalette.WindowText, config.cache[setting + '.fg']) indicator_color = self.tab_indicator_color(idx) tab.palette.setColor(QPalette.Base, indicator_color) p.drawControl(QStyle.CE_TabBarTab, tab) def tabInserted(self, idx): """Update visibility when a tab was inserted.""" super().tabInserted(idx) self.maybe_hide() def tabRemoved(self, idx): """Update visibility when a tab was removed.""" super().tabRemoved(idx) self.maybe_hide() def wheelEvent(self, e): """Override wheelEvent to make the action configurable. Args: e: The QWheelEvent """ if config.val.tabs.mousewheel_switching: super().wheelEvent(e) else: tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tabbed_browser.wheelEvent(e) @attr.s class Layouts: """Layout information for tab. Used by TabBarStyle._tab_layout(). """ text = attr.ib() icon = attr.ib() indicator = attr.ib() class TabBarStyle(QCommonStyle): """Qt style used by TabBar to fix some issues with the default one. This fixes the following things: - Remove the focus rectangle Ubuntu draws on tabs. - Force text to be left-aligned even though Qt has "centered" hardcoded. Unfortunately PyQt doesn't support QProxyStyle, so we need to do this the hard way... Based on: http://stackoverflow.com/a/17294081 https://code.google.com/p/makehuman/source/browse/trunk/makehuman/lib/qtgui.py """ ICON_PADDING = 4 def __init__(self): """Initialize all functions we're not overriding. This simply calls the corresponding function in self._style. """ self._style = QStyleFactory.create('Fusion') for method in ['drawComplexControl', 'drawItemPixmap', 'generatedIconPixmap', 'hitTestComplexControl', 'itemPixmapRect', 'itemTextRect', 'polish', 'styleHint', 'subControlRect', 'unpolish', 'drawItemText', 'sizeFromContents', 'drawPrimitive']: target = getattr(self._style, method) setattr(self, method, functools.partial(target)) super().__init__() def _draw_indicator(self, layouts, opt, p): """Draw the tab indicator. Args: layouts: The layouts from _tab_layout. opt: QStyleOption from drawControl. p: QPainter from drawControl. """ color = opt.palette.base().color() rect = layouts.indicator if color.isValid() and rect.isValid(): p.fillRect(rect, color) def _draw_icon(self, layouts, opt, p): """Draw the tab icon. Args: layouts: The layouts from _tab_layout. opt: QStyleOption p: QPainter """ qtutils.ensure_valid(layouts.icon) icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled else QIcon.Disabled) icon_state = (QIcon.On if opt.state & QStyle.State_Selected else QIcon.Off) icon = opt.icon.pixmap(opt.iconSize, icon_mode, icon_state) self._style.drawItemPixmap(p, layouts.icon, Qt.AlignCenter, icon) def drawControl(self, element, opt, p, widget=None): """Override drawControl to draw odd tabs in a different color. Draws the given element with the provided painter with the style options specified by option. Args: element: ControlElement opt: QStyleOption p: QPainter widget: QWidget """ if element not in [QStyle.CE_TabBarTab, QStyle.CE_TabBarTabShape, QStyle.CE_TabBarTabLabel]: # Let the real style draw it. self._style.drawControl(element, opt, p, widget) return layouts = self._tab_layout(opt) if layouts is None: log.misc.warning("Could not get layouts for tab!") return if element == QStyle.CE_TabBarTab: # We override this so we can control TabBarTabShape/TabBarTabLabel. self.drawControl(QStyle.CE_TabBarTabShape, opt, p, widget) self.drawControl(QStyle.CE_TabBarTabLabel, opt, p, widget) elif element == QStyle.CE_TabBarTabShape: p.fillRect(opt.rect, opt.palette.window()) self._draw_indicator(layouts, opt, p) # We use super() rather than self._style here because we don't want # any sophisticated drawing. super().drawControl(QStyle.CE_TabBarTabShape, opt, p, widget) elif element == QStyle.CE_TabBarTabLabel: if not opt.icon.isNull() and layouts.icon.isValid(): self._draw_icon(layouts, opt, p) alignment = (config.cache['tabs.title.alignment'] | Qt.AlignVCenter | Qt.TextHideMnemonic) self._style.drawItemText(p, layouts.text, int(alignment), opt.palette, bool(opt.state & QStyle.State_Enabled), opt.text, QPalette.WindowText) else: raise ValueError("Invalid element {!r}".format(element)) def pixelMetric(self, metric, option=None, widget=None): """Override pixelMetric to not shift the selected tab. Args: metric: PixelMetric option: const QStyleOption * widget: const QWidget * Return: An int. """ if metric in [QStyle.PM_TabBarTabShiftHorizontal, QStyle.PM_TabBarTabShiftVertical, QStyle.PM_TabBarTabHSpace, QStyle.PM_TabBarTabVSpace, QStyle.PM_TabBarScrollButtonWidth]: return 0 else: return self._style.pixelMetric(metric, option, widget) def subElementRect(self, sr, opt, widget=None): """Override subElementRect to use our own _tab_layout implementation. Args: sr: SubElement opt: QStyleOption widget: QWidget Return: A QRect. """ if sr == QStyle.SE_TabBarTabText: layouts = self._tab_layout(opt) if layouts is None: log.misc.warning("Could not get layouts for tab!") return QRect() return layouts.text elif sr in [QStyle.SE_TabWidgetTabBar, QStyle.SE_TabBarScrollLeftButton]: # Handling SE_TabBarScrollLeftButton so the left scroll button is # aligned properly. Otherwise, empty space will be shown after the # last tab even though the button width is set to 0 # # Need to use super() because we also use super() to render # element in drawControl(); otherwise, we may get bit by # style differences... return super().subElementRect(sr, opt, widget) else: return self._style.subElementRect(sr, opt, widget) def _tab_layout(self, opt): """Compute the text/icon rect from the opt rect. This is based on Qt's QCommonStylePrivate::tabLayout (qtbase/src/widgets/styles/qcommonstyle.cpp) as we can't use the private implementation. Args: opt: QStyleOptionTab Return: A Layout object with two QRects. """ padding = config.cache['tabs.padding'] indicator_padding = config.cache['tabs.indicator.padding'] text_rect = QRect(opt.rect) if not text_rect.isValid(): # This happens sometimes according to crash reports, but no idea # why... return None text_rect.adjust(padding.left, padding.top, -padding.right, -padding.bottom) indicator_width = config.cache['tabs.indicator.width'] if indicator_width == 0: indicator_rect = QRect() else: indicator_rect = QRect(opt.rect) qtutils.ensure_valid(indicator_rect) indicator_rect.adjust(padding.left + indicator_padding.left, padding.top + indicator_padding.top, 0, -(padding.bottom + indicator_padding.bottom)) indicator_rect.setWidth(indicator_width) text_rect.adjust(indicator_width + indicator_padding.left + indicator_padding.right, 0, 0, 0) icon_rect = self._get_icon_rect(opt, text_rect) if icon_rect.isValid(): text_rect.adjust( icon_rect.width() + TabBarStyle.ICON_PADDING, 0, 0, 0) text_rect = self._style.visualRect(opt.direction, opt.rect, text_rect) return Layouts(text=text_rect, icon=icon_rect, indicator=indicator_rect) def _get_icon_rect(self, opt, text_rect): """Get a QRect for the icon to draw. Args: opt: QStyleOptionTab text_rect: The QRect for the text. Return: A QRect. """ icon_size = opt.iconSize if not icon_size.isValid(): icon_extent = self.pixelMetric(QStyle.PM_SmallIconSize) icon_size = QSize(icon_extent, icon_extent) icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled else QIcon.Disabled) icon_state = (QIcon.On if opt.state & QStyle.State_Selected else QIcon.Off) # reserve space for favicon when tab bar is vertical (issue #1968) position = config.cache['tabs.position'] if (position in [QTabWidget.East, QTabWidget.West] and config.cache['tabs.favicons.show'] != 'never'): tab_icon_size = icon_size else: actual_size = opt.icon.actualSize(icon_size, icon_mode, icon_state) tab_icon_size = QSize( min(actual_size.width(), icon_size.width()), min(actual_size.height(), icon_size.height())) icon_top = text_rect.center().y() + 1 - tab_icon_size.height() // 2 icon_rect = QRect(QPoint(text_rect.left(), icon_top), tab_icon_size) icon_rect = self._style.visualRect(opt.direction, opt.rect, icon_rect) return icon_rect
1
25,033
I don't think this will actually align the indexes, if you have > 100 tabs, the tabs over 100 will be misaligned. In addition with less than 10 tabs, there will be a pointless space.
qutebrowser-qutebrowser
py
@@ -326,7 +326,7 @@ func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { headerValues[i] = "host:" + v4.Request.URL.Host } else { headerValues[i] = k + ":" + - strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + strings.Join(v4.signedHeaderVals[k], ",") } }
1
// Package v4 implements signing for AWS V4 signer package v4 import ( "crypto/hmac" "crypto/sha256" "encoding/hex" "fmt" "io" "net/http" "net/url" "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol/rest" ) const ( authHeaderPrefix = "AWS4-HMAC-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" ) var ignoredHeaders = rules{ blacklist{ mapRule{ "Content-Length": struct{}{}, "User-Agent": struct{}{}, }, }, } // requiredSignedHeaders is a whitelist for build canonical headers. var requiredSignedHeaders = rules{ whitelist{ mapRule{ "Cache-Control": struct{}{}, "Content-Disposition": struct{}{}, "Content-Encoding": struct{}{}, "Content-Language": struct{}{}, "Content-Md5": struct{}{}, "Content-Type": struct{}{}, "Expires": struct{}{}, "If-Match": struct{}{}, "If-Modified-Since": struct{}{}, "If-None-Match": struct{}{}, "If-Unmodified-Since": struct{}{}, "Range": struct{}{}, "X-Amz-Acl": struct{}{}, "X-Amz-Copy-Source": struct{}{}, "X-Amz-Copy-Source-If-Match": struct{}{}, "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, "X-Amz-Copy-Source-If-None-Match": struct{}{}, "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, "X-Amz-Grant-Write": struct{}{}, "X-Amz-Grant-Write-Acp": struct{}{}, "X-Amz-Metadata-Directive": struct{}{}, "X-Amz-Mfa": struct{}{}, "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, } // allowedHoisting is a whitelist for build query headers. The boolean value // represents whether or not it is a pattern. var allowedQueryHoisting = inclusiveRules{ blacklist{requiredSignedHeaders}, patterns{"X-Amz-"}, } type signer struct { Request *http.Request Time time.Time ExpireTime time.Duration ServiceName string Region string CredValues credentials.Value Credentials *credentials.Credentials Query url.Values Body io.ReadSeeker Debug aws.LogLevelType Logger aws.Logger isPresign bool formattedTime string formattedShortTime string signedHeaders string canonicalHeaders string canonicalString string credentialString string stringToSign string signature string authorization string notHoist bool signedHeaderVals http.Header } // Sign requests with signature version 4. // // Will sign the requests with the service config's Credentials object // Signing is skipped if the credentials is the credentials.AnonymousCredentials // object. func Sign(req *request.Request) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { return } region := req.ClientInfo.SigningRegion if region == "" { region = aws.StringValue(req.Config.Region) } name := req.ClientInfo.SigningName if name == "" { name = req.ClientInfo.ServiceName } s := signer{ Request: req.HTTPRequest, Time: req.Time, ExpireTime: req.ExpireTime, Query: req.HTTPRequest.URL.Query(), Body: req.Body, ServiceName: name, Region: region, Credentials: req.Config.Credentials, Debug: req.Config.LogLevel.Value(), Logger: req.Config.Logger, notHoist: req.NotHoist, } req.Error = s.sign() req.SignedHeaderVals = s.signedHeaderVals } func (v4 *signer) sign() error { if v4.ExpireTime != 0 { v4.isPresign = true } if v4.isRequestSigned() { if !v4.Credentials.IsExpired() { // If the request is already signed, and the credentials have not // expired yet ignore the signing request. return nil } // The credentials have expired for this request. The current signing // is invalid, and needs to be request because the request will fail. if v4.isPresign { v4.removePresign() // Update the request's query string to ensure the values stays in // sync in the case retrieving the new credentials fails. v4.Request.URL.RawQuery = v4.Query.Encode() } } var err error v4.CredValues, err = v4.Credentials.Get() if err != nil { return err } if v4.isPresign { v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) if v4.CredValues.SessionToken != "" { v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) } else { v4.Query.Del("X-Amz-Security-Token") } } else if v4.CredValues.SessionToken != "" { v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) } v4.build() if v4.Debug.Matches(aws.LogDebugWithSigning) { v4.logSigningInfo() } return nil } const logSignInfoMsg = `DEBUG: Request Signiture: ---[ CANONICAL STRING ]----------------------------- %s ---[ STRING TO SIGN ]-------------------------------- %s%s -----------------------------------------------------` const logSignedURLMsg = ` ---[ SIGNED URL ]------------------------------------ %s` func (v4 *signer) logSigningInfo() { signedURLMsg := "" if v4.isPresign { signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) } msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) v4.Logger.Log(msg) } func (v4 *signer) build() { v4.buildTime() // no depends v4.buildCredentialString() // no depends unsignedHeaders := v4.Request.Header if v4.isPresign { if !v4.notHoist { urlValues := url.Values{} urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends for k := range urlValues { v4.Query[k] = urlValues[k] } } } v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) v4.buildCanonicalString() // depends on canon headers / signed headers v4.buildStringToSign() // depends on canon string v4.buildSignature() // depends on string to sign if v4.isPresign { v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature } else { parts := []string{ authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, "SignedHeaders=" + v4.signedHeaders, "Signature=" + v4.signature, } v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } } func (v4 *signer) buildTime() { v4.formattedTime = v4.Time.UTC().Format(timeFormat) v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) if v4.isPresign { duration := int64(v4.ExpireTime / time.Second) v4.Query.Set("X-Amz-Date", v4.formattedTime) v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) } else { v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) } } func (v4 *signer) buildCredentialString() { v4.credentialString = strings.Join([]string{ v4.formattedShortTime, v4.Region, v4.ServiceName, "aws4_request", }, "/") if v4.isPresign { v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) } } func buildQuery(r rule, header http.Header) (url.Values, http.Header) { query := url.Values{} unsignedHeaders := http.Header{} for k, h := range header { if r.IsValid(k) { query[k] = h } else { unsignedHeaders[k] = h } } return query, unsignedHeaders } func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { var headers []string headers = append(headers, "host") for k, v := range header { canonicalKey := http.CanonicalHeaderKey(k) if !r.IsValid(canonicalKey) { continue // ignored header } lowerCaseKey := strings.ToLower(k) headers = append(headers, lowerCaseKey) if v4.signedHeaderVals == nil { v4.signedHeaderVals = make(http.Header) } v4.signedHeaderVals[lowerCaseKey] = v } sort.Strings(headers) v4.signedHeaders = strings.Join(headers, ";") if v4.isPresign { v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) } headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { headerValues[i] = "host:" + v4.Request.URL.Host } else { headerValues[i] = k + ":" + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") } } v4.canonicalHeaders = strings.Join(headerValues, "\n") } func (v4 *signer) buildCanonicalString() { v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) uri := v4.Request.URL.Opaque if uri != "" { uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") } else { uri = v4.Request.URL.Path } if uri == "" { uri = "/" } if v4.ServiceName != "s3" { uri = rest.EscapePath(uri, false) } v4.canonicalString = strings.Join([]string{ v4.Request.Method, uri, v4.Request.URL.RawQuery, v4.canonicalHeaders + "\n", v4.signedHeaders, v4.bodyDigest(), }, "\n") } func (v4 *signer) buildStringToSign() { v4.stringToSign = strings.Join([]string{ authHeaderPrefix, v4.formattedTime, v4.credentialString, hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), }, "\n") } func (v4 *signer) buildSignature() { secret := v4.CredValues.SecretAccessKey date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) region := makeHmac(date, []byte(v4.Region)) service := makeHmac(region, []byte(v4.ServiceName)) credentials := makeHmac(service, []byte("aws4_request")) signature := makeHmac(credentials, []byte(v4.stringToSign)) v4.signature = hex.EncodeToString(signature) } func (v4 *signer) bodyDigest() string { hash := v4.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { if v4.isPresign && v4.ServiceName == "s3" { hash = "UNSIGNED-PAYLOAD" } else if v4.Body == nil { hash = hex.EncodeToString(makeSha256([]byte{})) } else { hash = hex.EncodeToString(makeSha256Reader(v4.Body)) } v4.Request.Header.Add("X-Amz-Content-Sha256", hash) } return hash } // isRequestSigned returns if the request is currently signed or presigned func (v4 *signer) isRequestSigned() bool { if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { return true } if v4.Request.Header.Get("Authorization") != "" { return true } return false } // unsign removes signing flags for both signed and presigned requests. func (v4 *signer) removePresign() { v4.Query.Del("X-Amz-Algorithm") v4.Query.Del("X-Amz-Signature") v4.Query.Del("X-Amz-Security-Token") v4.Query.Del("X-Amz-Date") v4.Query.Del("X-Amz-Expires") v4.Query.Del("X-Amz-Credential") v4.Query.Del("X-Amz-SignedHeaders") } func makeHmac(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) hash.Write(data) return hash.Sum(nil) } func makeSha256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) } func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() start, _ := reader.Seek(0, 1) defer reader.Seek(start, 0) io.Copy(hash, reader) return hash.Sum(nil) }
1
7,778
hrm, unsigned headers now won't be included.
aws-aws-sdk-go
go
@@ -53,6 +53,7 @@ class CategoryCode */ public static function isValid(string $value): bool { - return strlen($value) <= 255; + return '' !== $value + && strlen($value) < 256; } }
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types = 1); namespace Ergonode\Category\Domain\ValueObject; /** */ class CategoryCode { /** * @var string */ private $value; /** * @param string $value */ public function __construct(string $value) { if (!self::isValid($value)) { throw new \InvalidArgumentException('Invalid category code value'); } $this->value = $value; } /** * @return string */ public function getValue(): string { return $this->value; } /** * @return string */ public function __toString(): string { return $this->value; } /** * @param string $value * * @return bool */ public static function isValid(string $value): bool { return strlen($value) <= 255; } }
1
8,399
what if `$value = ' ' `?
ergonode-backend
php
@@ -262,10 +262,16 @@ class SparkWrite { private class DynamicOverwrite extends BaseBatchWrite { @Override public void commit(WriterCommitMessage[] messages) { + Iterable<DataFile> files = files(messages); + if (Iterables.size(files) == 0) { + LOG.info("Dynamic overwrite is empty, skipping commit"); + return; + } + ReplacePartitions dynamicOverwrite = table.newReplacePartitions(); int numFiles = 0; - for (DataFile file : files(messages)) { + for (DataFile file : files) { numFiles += 1; dynamicOverwrite.addFile(file); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.source; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import org.apache.iceberg.AppendFiles; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileFormat; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.IsolationLevel; import org.apache.iceberg.OverwriteFiles; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.ReplacePartitions; import org.apache.iceberg.Schema; import org.apache.iceberg.SerializableTable; import org.apache.iceberg.Snapshot; import org.apache.iceberg.SnapshotSummary; import org.apache.iceberg.SnapshotUpdate; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.OutputFileFactory; import org.apache.iceberg.io.UnpartitionedWriter; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.spark.FileRewriteCoordinator; import org.apache.iceberg.spark.SparkWriteOptions; import org.apache.iceberg.util.PropertyUtil; import org.apache.iceberg.util.Tasks; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.broadcast.Broadcast; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.connector.write.BatchWrite; import org.apache.spark.sql.connector.write.DataWriter; import org.apache.spark.sql.connector.write.DataWriterFactory; import org.apache.spark.sql.connector.write.LogicalWriteInfo; import org.apache.spark.sql.connector.write.PhysicalWriteInfo; import org.apache.spark.sql.connector.write.WriterCommitMessage; import org.apache.spark.sql.connector.write.streaming.StreamingDataWriterFactory; import org.apache.spark.sql.connector.write.streaming.StreamingWrite; import org.apache.spark.sql.types.StructType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.iceberg.IsolationLevel.SERIALIZABLE; import static org.apache.iceberg.IsolationLevel.SNAPSHOT; import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS; import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT; import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS; import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT; import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES; import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT; import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS; import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT; import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT; import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT_DEFAULT; import static org.apache.iceberg.TableProperties.SPARK_WRITE_PARTITIONED_FANOUT_ENABLED; import static org.apache.iceberg.TableProperties.SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT; import static org.apache.iceberg.TableProperties.WRITE_TARGET_FILE_SIZE_BYTES; import static org.apache.iceberg.TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT; class SparkWrite { private static final Logger LOG = LoggerFactory.getLogger(SparkWrite.class); private final JavaSparkContext sparkContext; private final Table table; private final String queryId; private final FileFormat format; private final String applicationId; private final String wapId; private final long targetFileSize; private final Schema writeSchema; private final StructType dsSchema; private final Map<String, String> extraSnapshotMetadata; private final boolean partitionedFanoutEnabled; SparkWrite(SparkSession spark, Table table, LogicalWriteInfo writeInfo, String applicationId, String wapId, Schema writeSchema, StructType dsSchema) { this.sparkContext = JavaSparkContext.fromSparkContext(spark.sparkContext()); this.table = table; this.queryId = writeInfo.queryId(); this.format = getFileFormat(table.properties(), writeInfo.options()); this.applicationId = applicationId; this.wapId = wapId; this.writeSchema = writeSchema; this.dsSchema = dsSchema; this.extraSnapshotMetadata = Maps.newHashMap(); writeInfo.options().forEach((key, value) -> { if (key.startsWith(SnapshotSummary.EXTRA_METADATA_PREFIX)) { extraSnapshotMetadata.put(key.substring(SnapshotSummary.EXTRA_METADATA_PREFIX.length()), value); } }); long tableTargetFileSize = PropertyUtil.propertyAsLong( table.properties(), WRITE_TARGET_FILE_SIZE_BYTES, WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT); this.targetFileSize = writeInfo.options().getLong(SparkWriteOptions.TARGET_FILE_SIZE_BYTES, tableTargetFileSize); boolean tablePartitionedFanoutEnabled = PropertyUtil.propertyAsBoolean( table.properties(), SPARK_WRITE_PARTITIONED_FANOUT_ENABLED, SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT); this.partitionedFanoutEnabled = writeInfo.options() .getBoolean(SparkWriteOptions.FANOUT_ENABLED, tablePartitionedFanoutEnabled); } BatchWrite asBatchAppend() { return new BatchAppend(); } BatchWrite asDynamicOverwrite() { return new DynamicOverwrite(); } BatchWrite asOverwriteByFilter(Expression overwriteExpr) { return new OverwriteByFilter(overwriteExpr); } BatchWrite asCopyOnWriteMergeWrite(SparkMergeScan scan, IsolationLevel isolationLevel) { return new CopyOnWriteMergeWrite(scan, isolationLevel); } BatchWrite asRewrite(String fileSetID) { return new RewriteFiles(fileSetID); } StreamingWrite asStreamingAppend() { return new StreamingAppend(); } StreamingWrite asStreamingOverwrite() { return new StreamingOverwrite(); } private FileFormat getFileFormat(Map<String, String> tableProperties, Map<String, String> options) { Optional<String> formatOption = Optional.ofNullable(options.get(SparkWriteOptions.WRITE_FORMAT)); String formatString = formatOption .orElseGet(() -> tableProperties.getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT)); return FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH)); } private boolean isWapTable() { return Boolean.parseBoolean(table.properties().getOrDefault( TableProperties.WRITE_AUDIT_PUBLISH_ENABLED, TableProperties.WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT)); } // the writer factory works for both batch and streaming private WriterFactory createWriterFactory() { // broadcast the table metadata as the writer factory will be sent to executors Broadcast<Table> tableBroadcast = sparkContext.broadcast(SerializableTable.copyOf(table)); return new WriterFactory(tableBroadcast, format, targetFileSize, writeSchema, dsSchema, partitionedFanoutEnabled); } private void commitOperation(SnapshotUpdate<?> operation, String description) { LOG.info("Committing {} to table {}", description, table); if (applicationId != null) { operation.set("spark.app.id", applicationId); } if (!extraSnapshotMetadata.isEmpty()) { extraSnapshotMetadata.forEach(operation::set); } if (isWapTable() && wapId != null) { // write-audit-publish is enabled for this table and job // stage the changes without changing the current snapshot operation.set(SnapshotSummary.STAGED_WAP_ID_PROP, wapId); operation.stageOnly(); } long start = System.currentTimeMillis(); operation.commit(); // abort is automatically called if this fails long duration = System.currentTimeMillis() - start; LOG.info("Committed in {} ms", duration); } private void abort(WriterCommitMessage[] messages) { Map<String, String> props = table.properties(); Tasks.foreach(files(messages)) .retry(PropertyUtil.propertyAsInt(props, COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT)) .exponentialBackoff( PropertyUtil.propertyAsInt(props, COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT), PropertyUtil.propertyAsInt(props, COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT), PropertyUtil.propertyAsInt(props, COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT), 2.0 /* exponential */) .throwFailureWhenFinished() .run(file -> { table.io().deleteFile(file.path().toString()); }); } private Iterable<DataFile> files(WriterCommitMessage[] messages) { if (messages.length > 0) { return Iterables.concat(Iterables.transform(Arrays.asList(messages), message -> message != null ? ImmutableList.copyOf(((TaskCommit) message).files()) : ImmutableList.of())); } return ImmutableList.of(); } private abstract class BaseBatchWrite implements BatchWrite { @Override public DataWriterFactory createBatchWriterFactory(PhysicalWriteInfo info) { return createWriterFactory(); } @Override public void abort(WriterCommitMessage[] messages) { SparkWrite.this.abort(messages); } @Override public String toString() { return String.format("IcebergBatchWrite(table=%s, format=%s)", table, format); } } private class BatchAppend extends BaseBatchWrite { @Override public void commit(WriterCommitMessage[] messages) { AppendFiles append = table.newAppend(); int numFiles = 0; for (DataFile file : files(messages)) { numFiles += 1; append.appendFile(file); } commitOperation(append, String.format("append with %d new data files", numFiles)); } } private class DynamicOverwrite extends BaseBatchWrite { @Override public void commit(WriterCommitMessage[] messages) { ReplacePartitions dynamicOverwrite = table.newReplacePartitions(); int numFiles = 0; for (DataFile file : files(messages)) { numFiles += 1; dynamicOverwrite.addFile(file); } commitOperation(dynamicOverwrite, String.format("dynamic partition overwrite with %d new data files", numFiles)); } } private class OverwriteByFilter extends BaseBatchWrite { private final Expression overwriteExpr; private OverwriteByFilter(Expression overwriteExpr) { this.overwriteExpr = overwriteExpr; } @Override public void commit(WriterCommitMessage[] messages) { OverwriteFiles overwriteFiles = table.newOverwrite(); overwriteFiles.overwriteByRowFilter(overwriteExpr); int numFiles = 0; for (DataFile file : files(messages)) { numFiles += 1; overwriteFiles.addFile(file); } String commitMsg = String.format("overwrite by filter %s with %d new data files", overwriteExpr, numFiles); commitOperation(overwriteFiles, commitMsg); } } private class CopyOnWriteMergeWrite extends BaseBatchWrite { private final SparkMergeScan scan; private final IsolationLevel isolationLevel; private CopyOnWriteMergeWrite(SparkMergeScan scan, IsolationLevel isolationLevel) { this.scan = scan; this.isolationLevel = isolationLevel; } private List<DataFile> overwrittenFiles() { return scan.files().stream().map(FileScanTask::file).collect(Collectors.toList()); } private Expression conflictDetectionFilter() { // the list of filter expressions may be empty but is never null List<Expression> scanFilterExpressions = scan.filterExpressions(); Expression filter = Expressions.alwaysTrue(); for (Expression expr : scanFilterExpressions) { filter = Expressions.and(filter, expr); } return filter; } @Override public void commit(WriterCommitMessage[] messages) { OverwriteFiles overwriteFiles = table.newOverwrite(); List<DataFile> overwrittenFiles = overwrittenFiles(); int numOverwrittenFiles = overwrittenFiles.size(); for (DataFile overwrittenFile : overwrittenFiles) { overwriteFiles.deleteFile(overwrittenFile); } int numAddedFiles = 0; for (DataFile file : files(messages)) { numAddedFiles += 1; overwriteFiles.addFile(file); } if (isolationLevel == SERIALIZABLE) { commitWithSerializableIsolation(overwriteFiles, numOverwrittenFiles, numAddedFiles); } else if (isolationLevel == SNAPSHOT) { commitWithSnapshotIsolation(overwriteFiles, numOverwrittenFiles, numAddedFiles); } else { throw new IllegalArgumentException("Unsupported isolation level: " + isolationLevel); } } private void commitWithSerializableIsolation(OverwriteFiles overwriteFiles, int numOverwrittenFiles, int numAddedFiles) { Long scanSnapshotId = scan.snapshotId(); if (scanSnapshotId != null) { overwriteFiles.validateFromSnapshot(scanSnapshotId); } Expression conflictDetectionFilter = conflictDetectionFilter(); overwriteFiles.validateNoConflictingAppends(conflictDetectionFilter); String commitMsg = String.format( "overwrite of %d data files with %d new data files, scanSnapshotId: %d, conflictDetectionFilter: %s", numOverwrittenFiles, numAddedFiles, scanSnapshotId, conflictDetectionFilter); commitOperation(overwriteFiles, commitMsg); } private void commitWithSnapshotIsolation(OverwriteFiles overwriteFiles, int numOverwrittenFiles, int numAddedFiles) { String commitMsg = String.format( "overwrite of %d data files with %d new data files", numOverwrittenFiles, numAddedFiles); commitOperation(overwriteFiles, commitMsg); } } private class RewriteFiles extends BaseBatchWrite { private final String fileSetID; private RewriteFiles(String fileSetID) { this.fileSetID = fileSetID; } @Override public void commit(WriterCommitMessage[] messages) { FileRewriteCoordinator coordinator = FileRewriteCoordinator.get(); Set<DataFile> newDataFiles = Sets.newHashSetWithExpectedSize(messages.length); for (DataFile file : files(messages)) { newDataFiles.add(file); } coordinator.stageRewrite(table, fileSetID, Collections.unmodifiableSet(newDataFiles)); } } private abstract class BaseStreamingWrite implements StreamingWrite { private static final String QUERY_ID_PROPERTY = "spark.sql.streaming.queryId"; private static final String EPOCH_ID_PROPERTY = "spark.sql.streaming.epochId"; protected abstract String mode(); @Override public StreamingDataWriterFactory createStreamingWriterFactory(PhysicalWriteInfo info) { return createWriterFactory(); } @Override public final void commit(long epochId, WriterCommitMessage[] messages) { LOG.info("Committing epoch {} for query {} in {} mode", epochId, queryId, mode()); table.refresh(); Long lastCommittedEpochId = findLastCommittedEpochId(); if (lastCommittedEpochId != null && epochId <= lastCommittedEpochId) { LOG.info("Skipping epoch {} for query {} as it was already committed", epochId, queryId); return; } doCommit(epochId, messages); } protected abstract void doCommit(long epochId, WriterCommitMessage[] messages); protected <T> void commit(SnapshotUpdate<T> snapshotUpdate, long epochId, String description) { snapshotUpdate.set(QUERY_ID_PROPERTY, queryId); snapshotUpdate.set(EPOCH_ID_PROPERTY, Long.toString(epochId)); commitOperation(snapshotUpdate, description); } private Long findLastCommittedEpochId() { Snapshot snapshot = table.currentSnapshot(); Long lastCommittedEpochId = null; while (snapshot != null) { Map<String, String> summary = snapshot.summary(); String snapshotQueryId = summary.get(QUERY_ID_PROPERTY); if (queryId.equals(snapshotQueryId)) { lastCommittedEpochId = Long.valueOf(summary.get(EPOCH_ID_PROPERTY)); break; } Long parentSnapshotId = snapshot.parentId(); snapshot = parentSnapshotId != null ? table.snapshot(parentSnapshotId) : null; } return lastCommittedEpochId; } @Override public void abort(long epochId, WriterCommitMessage[] messages) { SparkWrite.this.abort(messages); } @Override public String toString() { return String.format("IcebergStreamingWrite(table=%s, format=%s)", table, format); } } private class StreamingAppend extends BaseStreamingWrite { @Override protected String mode() { return "append"; } @Override protected void doCommit(long epochId, WriterCommitMessage[] messages) { AppendFiles append = table.newFastAppend(); int numFiles = 0; for (DataFile file : files(messages)) { append.appendFile(file); numFiles++; } commit(append, epochId, String.format("streaming append with %d new data files", numFiles)); } } private class StreamingOverwrite extends BaseStreamingWrite { @Override protected String mode() { return "complete"; } @Override public void doCommit(long epochId, WriterCommitMessage[] messages) { OverwriteFiles overwriteFiles = table.newOverwrite(); overwriteFiles.overwriteByRowFilter(Expressions.alwaysTrue()); int numFiles = 0; for (DataFile file : files(messages)) { overwriteFiles.addFile(file); numFiles++; } commit(overwriteFiles, epochId, String.format("streaming complete overwrite with %d new data files", numFiles)); } } public static class TaskCommit implements WriterCommitMessage { private final DataFile[] taskFiles; TaskCommit(DataFile[] taskFiles) { this.taskFiles = taskFiles; } DataFile[] files() { return taskFiles; } } private static class WriterFactory implements DataWriterFactory, StreamingDataWriterFactory { private final Broadcast<Table> tableBroadcast; private final FileFormat format; private final long targetFileSize; private final Schema writeSchema; private final StructType dsSchema; private final boolean partitionedFanoutEnabled; protected WriterFactory(Broadcast<Table> tableBroadcast, FileFormat format, long targetFileSize, Schema writeSchema, StructType dsSchema, boolean partitionedFanoutEnabled) { this.tableBroadcast = tableBroadcast; this.format = format; this.targetFileSize = targetFileSize; this.writeSchema = writeSchema; this.dsSchema = dsSchema; this.partitionedFanoutEnabled = partitionedFanoutEnabled; } @Override public DataWriter<InternalRow> createWriter(int partitionId, long taskId) { return createWriter(partitionId, taskId, 0); } @Override public DataWriter<InternalRow> createWriter(int partitionId, long taskId, long epochId) { Table table = tableBroadcast.value(); OutputFileFactory fileFactory = OutputFileFactory.builderFor(table, partitionId, taskId).format(format).build(); SparkAppenderFactory appenderFactory = SparkAppenderFactory.builderFor(table, writeSchema, dsSchema).build(); PartitionSpec spec = table.spec(); FileIO io = table.io(); if (spec.isUnpartitioned()) { return new Unpartitioned3Writer(spec, format, appenderFactory, fileFactory, io, targetFileSize); } else if (partitionedFanoutEnabled) { return new PartitionedFanout3Writer( spec, format, appenderFactory, fileFactory, io, targetFileSize, writeSchema, dsSchema); } else { return new Partitioned3Writer( spec, format, appenderFactory, fileFactory, io, targetFileSize, writeSchema, dsSchema); } } } private static class Unpartitioned3Writer extends UnpartitionedWriter<InternalRow> implements DataWriter<InternalRow> { Unpartitioned3Writer(PartitionSpec spec, FileFormat format, SparkAppenderFactory appenderFactory, OutputFileFactory fileFactory, FileIO io, long targetFileSize) { super(spec, format, appenderFactory, fileFactory, io, targetFileSize); } @Override public WriterCommitMessage commit() throws IOException { this.close(); return new TaskCommit(dataFiles()); } } private static class Partitioned3Writer extends SparkPartitionedWriter implements DataWriter<InternalRow> { Partitioned3Writer(PartitionSpec spec, FileFormat format, SparkAppenderFactory appenderFactory, OutputFileFactory fileFactory, FileIO io, long targetFileSize, Schema schema, StructType sparkSchema) { super(spec, format, appenderFactory, fileFactory, io, targetFileSize, schema, sparkSchema); } @Override public WriterCommitMessage commit() throws IOException { this.close(); return new TaskCommit(dataFiles()); } } private static class PartitionedFanout3Writer extends SparkPartitionedFanoutWriter implements DataWriter<InternalRow> { PartitionedFanout3Writer(PartitionSpec spec, FileFormat format, SparkAppenderFactory appenderFactory, OutputFileFactory fileFactory, FileIO io, long targetFileSize, Schema schema, StructType sparkSchema) { super(spec, format, appenderFactory, fileFactory, io, targetFileSize, schema, sparkSchema); } @Override public WriterCommitMessage commit() throws IOException { this.close(); return new TaskCommit(dataFiles()); } } }
1
40,697
What about using `!files.hasNext` instead? I'm not sure we want to assume that the iterable can be consumed multiple times. Plus there's no need to consume the entire iterable just to check whether it is empty.
apache-iceberg
java
@@ -85,6 +85,11 @@ Container* Container::getParentContainer() return thing->getContainer(); } +std::string Container::getName() const { + const ItemType& it = items[id]; + return getNameDescription(it, this, -1, false); +} + bool Container::hasParent() const { return getID() != ITEM_BROWSEFIELD && dynamic_cast<const Player*>(getParent()) == nullptr;
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "container.h" #include "iomap.h" #include "game.h" extern Game g_game; Container::Container(uint16_t type) : Container(type, items[type].maxItems) {} Container::Container(uint16_t type, uint16_t size, bool unlocked /*= true*/, bool pagination /*= false*/) : Item(type), maxSize(size), unlocked(unlocked), pagination(pagination) {} Container::Container(Tile* tile) : Container(ITEM_BROWSEFIELD, 30, false, true) { TileItemVector* itemVector = tile->getItemList(); if (itemVector) { for (Item* item : *itemVector) { if ((item->getContainer() || item->hasProperty(CONST_PROP_MOVEABLE)) && !item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { itemlist.push_front(item); item->setParent(this); } } } setParent(tile); } Container::~Container() { if (getID() == ITEM_BROWSEFIELD) { g_game.browseFields.erase(getTile()); for (Item* item : itemlist) { item->setParent(parent); } } else { for (Item* item : itemlist) { item->setParent(nullptr); item->decrementReferenceCounter(); } } } Item* Container::clone() const { Container* clone = static_cast<Container*>(Item::clone()); for (Item* item : itemlist) { clone->addItem(item->clone()); } clone->totalWeight = totalWeight; return clone; } Container* Container::getParentContainer() { Thing* thing = getParent(); if (!thing) { return nullptr; } return thing->getContainer(); } bool Container::hasParent() const { return getID() != ITEM_BROWSEFIELD && dynamic_cast<const Player*>(getParent()) == nullptr; } void Container::addItem(Item* item) { itemlist.push_back(item); item->setParent(this); } Attr_ReadValue Container::readAttr(AttrTypes_t attr, PropStream& propStream) { if (attr == ATTR_CONTAINER_ITEMS) { if (!propStream.read<uint32_t>(serializationCount)) { return ATTR_READ_ERROR; } return ATTR_READ_END; } return Item::readAttr(attr, propStream); } bool Container::unserializeItemNode(OTB::Loader& loader, const OTB::Node& node, PropStream& propStream) { bool ret = Item::unserializeItemNode(loader, node, propStream); if (!ret) { return false; } for (auto& itemNode : node.children) { //load container items if (itemNode.type != OTBM_ITEM) { // unknown type return false; } PropStream itemPropStream; if (!loader.getProps(itemNode, itemPropStream)) { return false; } Item* item = Item::CreateItem(itemPropStream); if (!item) { return false; } if (!item->unserializeItemNode(loader, itemNode, itemPropStream)) { return false; } addItem(item); updateItemWeight(item->getWeight()); } return true; } void Container::updateItemWeight(int32_t diff) { totalWeight += diff; if (Container* parentContainer = getParentContainer()) { parentContainer->updateItemWeight(diff); } } uint32_t Container::getWeight() const { return Item::getWeight() + totalWeight; } std::string Container::getContentDescription() const { std::ostringstream os; return getContentDescription(os).str(); } std::ostringstream& Container::getContentDescription(std::ostringstream& os) const { bool firstitem = true; for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) { Item* item = *it; Container* container = item->getContainer(); if (container && !container->empty()) { continue; } if (firstitem) { firstitem = false; } else { os << ", "; } os << item->getNameDescription(); } if (firstitem) { os << "nothing"; } return os; } Item* Container::getItemByIndex(size_t index) const { if (index >= size()) { return nullptr; } return itemlist[index]; } uint32_t Container::getItemHoldingCount() const { uint32_t counter = 0; for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) { ++counter; } return counter; } bool Container::isHoldingItem(const Item* item) const { for (ContainerIterator it = iterator(); it.hasNext(); it.advance()) { if (*it == item) { return true; } } return false; } void Container::onAddContainerItem(Item* item) { SpectatorVec spectators; g_game.map.getSpectators(spectators, getPosition(), false, true, 2, 2, 2, 2); //send to client for (Creature* spectator : spectators) { spectator->getPlayer()->sendAddContainerItem(this, item); } //event methods for (Creature* spectator : spectators) { spectator->getPlayer()->onAddContainerItem(item); } } void Container::onUpdateContainerItem(uint32_t index, Item* oldItem, Item* newItem) { SpectatorVec spectators; g_game.map.getSpectators(spectators, getPosition(), false, true, 2, 2, 2, 2); //send to client for (Creature* spectator : spectators) { spectator->getPlayer()->sendUpdateContainerItem(this, index, newItem); } //event methods for (Creature* spectator : spectators) { spectator->getPlayer()->onUpdateContainerItem(this, oldItem, newItem); } } void Container::onRemoveContainerItem(uint32_t index, Item* item) { SpectatorVec spectators; g_game.map.getSpectators(spectators, getPosition(), false, true, 2, 2, 2, 2); //send change to client for (Creature* spectator : spectators) { spectator->getPlayer()->sendRemoveContainerItem(this, index); } //event methods for (Creature* spectator : spectators) { spectator->getPlayer()->onRemoveContainerItem(this, item); } } ReturnValue Container::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature* actor/* = nullptr*/) const { bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags); if (childIsOwner) { //a child container is querying, since we are the top container (not carried by a player) //just return with no error. return RETURNVALUE_NOERROR; } if (!unlocked) { return RETURNVALUE_NOTPOSSIBLE; } const Item* item = thing.getItem(); if (item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } if (!item->isPickupable()) { return RETURNVALUE_CANNOTPICKUP; } if (item == this) { return RETURNVALUE_THISISIMPOSSIBLE; } const Cylinder* cylinder = getParent(); if (!hasBitSet(FLAG_NOLIMIT, flags)) { while (cylinder) { if (cylinder == &thing) { return RETURNVALUE_THISISIMPOSSIBLE; } if (dynamic_cast<const Inbox*>(cylinder)) { return RETURNVALUE_CONTAINERNOTENOUGHROOM; } cylinder = cylinder->getParent(); } if (index == INDEX_WHEREEVER && size() >= capacity()) { return RETURNVALUE_CONTAINERNOTENOUGHROOM; } } else { while (cylinder) { if (cylinder == &thing) { return RETURNVALUE_THISISIMPOSSIBLE; } cylinder = cylinder->getParent(); } } const Cylinder* topParent = getTopParent(); if (topParent != this) { return topParent->queryAdd(INDEX_WHEREEVER, *item, count, flags | FLAG_CHILDISOWNER, actor); } else { return RETURNVALUE_NOERROR; } } ReturnValue Container::queryMaxCount(int32_t index, const Thing& thing, uint32_t count, uint32_t& maxQueryCount, uint32_t flags) const { const Item* item = thing.getItem(); if (item == nullptr) { maxQueryCount = 0; return RETURNVALUE_NOTPOSSIBLE; } if (hasBitSet(FLAG_NOLIMIT, flags)) { maxQueryCount = std::max<uint32_t>(1, count); return RETURNVALUE_NOERROR; } int32_t freeSlots = std::max<int32_t>(capacity() - size(), 0); if (item->isStackable()) { uint32_t n = 0; if (index == INDEX_WHEREEVER) { //Iterate through every item and check how much free stackable slots there is. uint32_t slotIndex = 0; for (Item* containerItem : itemlist) { if (containerItem != item && containerItem->equals(item) && containerItem->getItemCount() < 100) { uint32_t remainder = (100 - containerItem->getItemCount()); if (queryAdd(slotIndex++, *item, remainder, flags) == RETURNVALUE_NOERROR) { n += remainder; } } } } else { const Item* destItem = getItemByIndex(index); if (item->equals(destItem) && destItem->getItemCount() < 100) { uint32_t remainder = 100 - destItem->getItemCount(); if (queryAdd(index, *item, remainder, flags) == RETURNVALUE_NOERROR) { n = remainder; } } } maxQueryCount = freeSlots * 100 + n; if (maxQueryCount < count) { return RETURNVALUE_CONTAINERNOTENOUGHROOM; } } else { maxQueryCount = freeSlots; if (maxQueryCount == 0) { return RETURNVALUE_CONTAINERNOTENOUGHROOM; } } return RETURNVALUE_NOERROR; } ReturnValue Container::queryRemove(const Thing& thing, uint32_t count, uint32_t flags) const { int32_t index = getThingIndex(&thing); if (index == -1) { return RETURNVALUE_NOTPOSSIBLE; } const Item* item = thing.getItem(); if (item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } if (count == 0 || (item->isStackable() && count > item->getItemCount())) { return RETURNVALUE_NOTPOSSIBLE; } if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) { return RETURNVALUE_NOTMOVEABLE; } return RETURNVALUE_NOERROR; } Cylinder* Container::queryDestination(int32_t& index, const Thing& thing, Item** destItem, uint32_t& flags) { if (!unlocked) { *destItem = nullptr; return this; } if (index == 254 /*move up*/) { index = INDEX_WHEREEVER; *destItem = nullptr; Container* parentContainer = dynamic_cast<Container*>(getParent()); if (parentContainer) { return parentContainer; } return this; } if (index == 255 /*add wherever*/) { index = INDEX_WHEREEVER; *destItem = nullptr; } else if (index >= static_cast<int32_t>(capacity())) { /* if you have a container, maximize it to show all 20 slots then you open a bag that is inside the container you will have a bag with 8 slots and a "grey" area where the other 12 slots where from the container if you drop the item on that grey area the client calculates the slot position as if the bag has 20 slots */ index = INDEX_WHEREEVER; *destItem = nullptr; } const Item* item = thing.getItem(); if (!item) { return this; } if (index != INDEX_WHEREEVER) { Item* itemFromIndex = getItemByIndex(index); if (itemFromIndex) { *destItem = itemFromIndex; } Cylinder* subCylinder = dynamic_cast<Cylinder*>(*destItem); if (subCylinder) { index = INDEX_WHEREEVER; *destItem = nullptr; return subCylinder; } } bool autoStack = !hasBitSet(FLAG_IGNOREAUTOSTACK, flags); if (autoStack && item->isStackable() && item->getParent() != this) { //try find a suitable item to stack with uint32_t n = 0; for (Item* listItem : itemlist) { if (listItem != item && listItem->equals(item) && listItem->getItemCount() < 100) { *destItem = listItem; index = n; return this; } ++n; } } return this; } void Container::addThing(Thing* thing) { return addThing(0, thing); } void Container::addThing(int32_t index, Thing* thing) { if (index >= static_cast<int32_t>(capacity())) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (item == nullptr) { return /*RETURNVALUE_NOTPOSSIBLE*/; } item->setParent(this); itemlist.push_front(item); updateItemWeight(item->getWeight()); //send change to client if (getParent() && (getParent() != VirtualCylinder::virtualCylinder)) { onAddContainerItem(item); } } void Container::addItemBack(Item* item) { addItem(item); updateItemWeight(item->getWeight()); //send change to client if (getParent() && (getParent() != VirtualCylinder::virtualCylinder)) { onAddContainerItem(item); } } void Container::updateThing(Thing* thing, uint16_t itemId, uint32_t count) { int32_t index = getThingIndex(thing); if (index == -1) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* item = thing->getItem(); if (item == nullptr) { return /*RETURNVALUE_NOTPOSSIBLE*/; } const int32_t oldWeight = item->getWeight(); item->setID(itemId); item->setSubType(count); updateItemWeight(-oldWeight + item->getWeight()); //send change to client if (getParent()) { onUpdateContainerItem(index, item, item); } } void Container::replaceThing(uint32_t index, Thing* thing) { Item* item = thing->getItem(); if (!item) { return /*RETURNVALUE_NOTPOSSIBLE*/; } Item* replacedItem = getItemByIndex(index); if (!replacedItem) { return /*RETURNVALUE_NOTPOSSIBLE*/; } itemlist[index] = item; item->setParent(this); updateItemWeight(-static_cast<int32_t>(replacedItem->getWeight()) + item->getWeight()); //send change to client if (getParent()) { onUpdateContainerItem(index, replacedItem, item); } replacedItem->setParent(nullptr); } void Container::removeThing(Thing* thing, uint32_t count) { Item* item = thing->getItem(); if (item == nullptr) { return /*RETURNVALUE_NOTPOSSIBLE*/; } int32_t index = getThingIndex(thing); if (index == -1) { return /*RETURNVALUE_NOTPOSSIBLE*/; } if (item->isStackable() && count != item->getItemCount()) { uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count)); const int32_t oldWeight = item->getWeight(); item->setItemCount(newCount); updateItemWeight(-oldWeight + item->getWeight()); //send change to client if (getParent()) { onUpdateContainerItem(index, item, item); } } else { updateItemWeight(-static_cast<int32_t>(item->getWeight())); //send change to client if (getParent()) { onRemoveContainerItem(index, item); } item->setParent(nullptr); itemlist.erase(itemlist.begin() + index); } } int32_t Container::getThingIndex(const Thing* thing) const { int32_t index = 0; for (Item* item : itemlist) { if (item == thing) { return index; } ++index; } return -1; } size_t Container::getFirstIndex() const { return 0; } size_t Container::getLastIndex() const { return size(); } uint32_t Container::getItemTypeCount(uint16_t itemId, int32_t subType/* = -1*/) const { uint32_t count = 0; for (Item* item : itemlist) { if (item->getID() == itemId) { count += countByType(item, subType); } } return count; } std::map<uint32_t, uint32_t>& Container::getAllItemTypeCount(std::map<uint32_t, uint32_t>& countMap) const { for (Item* item : itemlist) { countMap[item->getID()] += item->getItemCount(); } return countMap; } Thing* Container::getThing(size_t index) const { return getItemByIndex(index); } void Container::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t) { Cylinder* topParent = getTopParent(); if (topParent->getCreature()) { topParent->postAddNotification(thing, oldParent, index, LINK_TOPPARENT); } else if (topParent == this) { //let the tile class notify surrounding players if (topParent->getParent()) { topParent->getParent()->postAddNotification(thing, oldParent, index, LINK_NEAR); } } else { topParent->postAddNotification(thing, oldParent, index, LINK_PARENT); } } void Container::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t) { Cylinder* topParent = getTopParent(); if (topParent->getCreature()) { topParent->postRemoveNotification(thing, newParent, index, LINK_TOPPARENT); } else if (topParent == this) { //let the tile class notify surrounding players if (topParent->getParent()) { topParent->getParent()->postRemoveNotification(thing, newParent, index, LINK_NEAR); } } else { topParent->postRemoveNotification(thing, newParent, index, LINK_PARENT); } } void Container::internalAddThing(Thing* thing) { internalAddThing(0, thing); } void Container::internalAddThing(uint32_t, Thing* thing) { Item* item = thing->getItem(); if (item == nullptr) { return; } item->setParent(this); itemlist.push_front(item); updateItemWeight(item->getWeight()); } void Container::startDecaying() { for (Item* item : itemlist) { item->startDecaying(); } } ContainerIterator Container::iterator() const { ContainerIterator cit; if (!itemlist.empty()) { cit.over.push_back(this); cit.cur = itemlist.begin(); } return cit; } Item* ContainerIterator::operator*() { return *cur; } void ContainerIterator::advance() { if (Item* i = *cur) { if (Container* c = i->getContainer()) { if (!c->empty()) { over.push_back(c); } } } ++cur; if (cur == over.front()->itemlist.end()) { over.pop_front(); if (!over.empty()) { cur = over.front()->itemlist.begin(); } } }
1
17,198
I think we could add bool addArticle here defaulted to false like its done in item class so std::string Container::getName(bool addArticle /* = false*/) const { and pass that variable to getNameDescription call
otland-forgottenserver
cpp
@@ -47,11 +47,11 @@ public class LockableBottomSheetBehavior<V extends View> extends ViewPagerBottom @Override public boolean onStartNestedScroll(CoordinatorLayout coordinatorLayout, V child, View directTargetChild, - View target, int nestedScrollAxes) { + View target, int axes, int type) { boolean handled = false; if (!isLocked) { - handled = super.onStartNestedScroll(coordinatorLayout, child, directTargetChild, target, nestedScrollAxes); + handled = super.onStartNestedScroll(coordinatorLayout, child, directTargetChild, target, axes, type); } return handled;
1
package de.danoeh.antennapod.view; import android.content.Context; import android.util.AttributeSet; import android.view.MotionEvent; import android.view.View; import androidx.coordinatorlayout.widget.CoordinatorLayout; import com.google.android.material.bottomsheet.ViewPagerBottomSheetBehavior; /** * Based on https://stackoverflow.com/a/40798214 */ public class LockableBottomSheetBehavior<V extends View> extends ViewPagerBottomSheetBehavior<V> { private boolean isLocked = false; public LockableBottomSheetBehavior() {} public LockableBottomSheetBehavior(Context context, AttributeSet attrs) { super(context, attrs); } public void setLocked(boolean locked) { isLocked = locked; } @Override public boolean onInterceptTouchEvent(CoordinatorLayout parent, V child, MotionEvent event) { boolean handled = false; if (!isLocked) { handled = super.onInterceptTouchEvent(parent, child, event); } return handled; } @Override public boolean onTouchEvent(CoordinatorLayout parent, V child, MotionEvent event) { boolean handled = false; if (!isLocked) { handled = super.onTouchEvent(parent, child, event); } return handled; } @Override public boolean onStartNestedScroll(CoordinatorLayout coordinatorLayout, V child, View directTargetChild, View target, int nestedScrollAxes) { boolean handled = false; if (!isLocked) { handled = super.onStartNestedScroll(coordinatorLayout, child, directTargetChild, target, nestedScrollAxes); } return handled; } @Override public void onNestedPreScroll(CoordinatorLayout coordinatorLayout, V child, View target, int dx, int dy, int[] consumed) { if (!isLocked) { super.onNestedPreScroll(coordinatorLayout, child, target, dx, dy, consumed); } } @Override public void onStopNestedScroll(CoordinatorLayout coordinatorLayout, V child, View target) { if (!isLocked) { super.onStopNestedScroll(coordinatorLayout, child, target); } } @Override public boolean onNestedPreFling(CoordinatorLayout coordinatorLayout, V child, View target, float velocityX, float velocityY) { boolean handled = false; if (!isLocked) { handled = super.onNestedPreFling(coordinatorLayout, child, target, velocityX, velocityY); } return handled; } }
1
16,887
What if a library function on the outside still calls the old method? It is then no longer blocked properly. Have you tested the change?
AntennaPod-AntennaPod
java
@@ -197,11 +197,15 @@ static mrb_value build_app_response(struct st_mruby_subreq_t *subreq) return resp; } -static void append_bufs(struct st_mruby_subreq_t *subreq, h2o_iovec_t *inbufs, size_t inbufcnt) +static void append_bufs(struct st_mruby_subreq_t *subreq, h2o_sendvec_t *inbufs, size_t inbufcnt) { - int i; + size_t i; for (i = 0; i != inbufcnt; ++i) { - h2o_buffer_append(&subreq->buf, inbufs[i].base, inbufs[i].len); + char *dst = h2o_buffer_reserve(&subreq->buf, inbufs[i].len).base; + assert(dst != NULL && "no memory or disk space; FIXME bail out gracefully"); + if (!(*inbufs[i].fill_cb)(&subreq->super, h2o_iovec_init(dst, inbufs[i].len), inbufs + i, 0)) + h2o_fatal("FIXME handle error from pull handler"); + subreq->buf->size += inbufs[i].len; } }
1
/* * Copyright (c) 2017 Ichito Nagata, Fastly, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <mruby.h> #include <mruby/array.h> #include <mruby/error.h> #include <mruby/hash.h> #include <mruby/string.h> #include <mruby/variable.h> #include "h2o/mruby_.h" #include "embedded.c.h" struct st_mruby_subreq_conn_t { h2o_conn_t super; struct { h2o_iovec_t host; h2o_iovec_t port; struct sockaddr_storage addr; socklen_t len; } server; struct { h2o_iovec_t host; h2o_iovec_t port; struct sockaddr_storage addr; socklen_t len; } remote; }; struct st_mruby_subreq_t { h2o_req_t super; struct st_mruby_subreq_conn_t conn; h2o_mruby_context_t *ctx; h2o_buffer_t *buf; mrb_value receiver; struct { mrb_value request; mrb_value input_stream; } refs; mrb_value error_stream; struct { h2o_mruby_generator_t *response; h2o_mruby_generator_t *body; } shortcut; enum { INITIAL, RECEIVED, FINAL_RECEIVED } state; unsigned char chain_proceed : 1; }; struct st_h2o_mruby_middleware_sender_t { h2o_mruby_sender_t super; h2o_doublebuffer_t sending; struct st_mruby_subreq_t *subreq; struct { h2o_iovec_t *bufs; size_t bufcnt; } blocking; }; static void dispose_subreq(struct st_mruby_subreq_t *subreq) { /* subreq must be alive until generator gets disposed if shortcut is used */ assert(subreq->shortcut.response == NULL); assert(subreq->shortcut.body == NULL); if (!mrb_nil_p(subreq->error_stream)) { mrb_gc_unregister(subreq->ctx->shared->mrb, subreq->error_stream); subreq->error_stream = mrb_nil_value(); } if (subreq->buf != NULL) h2o_buffer_dispose(&subreq->buf); if (!mrb_nil_p(subreq->refs.request)) DATA_PTR(subreq->refs.request) = NULL; if (!mrb_nil_p(subreq->refs.input_stream)) DATA_PTR(subreq->refs.input_stream) = NULL; h2o_dispose_request(&subreq->super); free(subreq); } static void on_gc_dispose_app_request(mrb_state *mrb, void *_subreq) { struct st_mruby_subreq_t *subreq = _subreq; if (subreq == NULL) return; subreq->refs.request = mrb_nil_value(); if (mrb_nil_p(subreq->refs.input_stream)) dispose_subreq(subreq); } static void on_gc_dispose_app_input_stream(mrb_state *mrb, void *_subreq) { struct st_mruby_subreq_t *subreq = _subreq; if (subreq == NULL) return; subreq->refs.input_stream = mrb_nil_value(); if (mrb_nil_p(subreq->refs.request)) dispose_subreq(subreq); } const static struct mrb_data_type app_request_type = {"app_request_type", on_gc_dispose_app_request}; const static struct mrb_data_type app_input_stream_type = {"app_input_stream", on_gc_dispose_app_input_stream}; static h2o_iovec_t convert_env_to_header_name(h2o_mem_pool_t *pool, const char *name, size_t len) { #define KEY_PREFIX "HTTP_" #define KEY_PREFIX_LEN (sizeof(KEY_PREFIX) - 1) if (len < KEY_PREFIX_LEN || !h2o_memis(name, KEY_PREFIX_LEN, KEY_PREFIX, KEY_PREFIX_LEN)) { return h2o_iovec_init(NULL, 0); } h2o_iovec_t ret; ret.len = len - KEY_PREFIX_LEN; ret.base = h2o_mem_alloc_pool(pool, char, ret.len); name += KEY_PREFIX_LEN; len -= KEY_PREFIX_LEN; char *d = ret.base; for (; len != 0; ++name, --len) *d++ = *name == '_' ? '-' : h2o_tolower(*name); return ret; #undef KEY_PREFIX #undef KEY_PREFIX_LEN } static int iterate_headers_callback(h2o_mruby_shared_context_t *shared_ctx, h2o_mem_pool_t *pool, h2o_header_t *header, void *cb_data) { mrb_value result_hash = mrb_obj_value(cb_data); mrb_value n; if (h2o_iovec_is_token(header->name)) { const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, header->name); n = h2o_mruby_token_string(shared_ctx, token); } else { n = h2o_mruby_new_str(shared_ctx->mrb, header->name->base, header->name->len); } mrb_value v = h2o_mruby_new_str(shared_ctx->mrb, header->value.base, header->value.len); mrb_hash_set(shared_ctx->mrb, result_hash, n, v); return 0; } static mrb_value build_app_response(struct st_mruby_subreq_t *subreq) { h2o_req_t *req = &subreq->super; h2o_mruby_context_t *ctx = subreq->ctx; mrb_state *mrb = ctx->shared->mrb; /* build response array */ mrb_value resp = mrb_ary_new_capa(mrb, 3); /* status */ mrb_ary_set(mrb, resp, 0, mrb_fixnum_value(req->res.status)); /* headers */ { mrb_value headers_hash = mrb_hash_new_capa(mrb, (int)req->res.headers.size); h2o_mruby_iterate_native_headers(ctx->shared, &req->pool, &req->res.headers, iterate_headers_callback, mrb_obj_ptr(headers_hash)); if (req->res.content_length != SIZE_MAX) { h2o_token_t *token = H2O_TOKEN_CONTENT_LENGTH; mrb_value n = h2o_mruby_new_str(mrb, token->buf.base, token->buf.len); mrb_value v = h2o_mruby_to_str(mrb, mrb_fixnum_value(req->res.content_length)); mrb_hash_set(mrb, headers_hash, n, v); } mrb_ary_set(mrb, resp, 1, headers_hash); } /* body */ { mrb_value body = h2o_mruby_create_data_instance( mrb, mrb_ary_entry(ctx->shared->constants, H2O_MRUBY_APP_INPUT_STREAM_CLASS), subreq, &app_input_stream_type); mrb_funcall(mrb, body, "initialize", 0); mrb_ary_set(mrb, resp, 2, body); } return resp; } static void append_bufs(struct st_mruby_subreq_t *subreq, h2o_iovec_t *inbufs, size_t inbufcnt) { int i; for (i = 0; i != inbufcnt; ++i) { h2o_buffer_append(&subreq->buf, inbufs[i].base, inbufs[i].len); } } static mrb_value detach_receiver(struct st_mruby_subreq_t *subreq) { mrb_value receiver = subreq->receiver; assert(!mrb_nil_p(receiver)); subreq->receiver = mrb_nil_value(); mrb_gc_unregister(subreq->ctx->shared->mrb, receiver); mrb_gc_protect(subreq->ctx->shared->mrb, receiver); return receiver; } static void send_response_shortcutted(struct st_mruby_subreq_t *subreq); static void subreq_ostream_send(h2o_ostream_t *_self, h2o_req_t *_subreq, h2o_iovec_t *inbufs, size_t inbufcnt, h2o_send_state_t state) { struct st_mruby_subreq_t *subreq = (void *)_subreq; mrb_state *mrb = subreq->ctx->shared->mrb; /* body shortcut */ if (subreq->shortcut.body != NULL) { if (subreq->shortcut.body->sender->final_sent) return; /* TODO: close subreq ASAP */ subreq->chain_proceed = 1; if (subreq->buf == NULL) { /* flushing chunks has been finished, so send directly */ h2o_mruby_sender_do_send(subreq->shortcut.body, inbufs, inbufcnt, state); } else { /* flushing, buffer chunks again */ append_bufs(subreq, inbufs, inbufcnt); } return; } int is_first = subreq->state == INITIAL; if (h2o_send_state_is_in_progress(state)) { h2o_proceed_response_deferred(&subreq->super); subreq->state = RECEIVED; } else { subreq->state = FINAL_RECEIVED; } append_bufs(subreq, inbufs, inbufcnt); /* response shortcut */ if (subreq->shortcut.response != NULL) { send_response_shortcutted(subreq); return; } if (mrb_nil_p(subreq->receiver)) return; int gc_arena = mrb_gc_arena_save(mrb); if (is_first) { /* the fiber is waiting due to calling req.join */ h2o_mruby_run_fiber(subreq->ctx, detach_receiver(subreq), mrb_nil_value(), NULL); } else if (subreq->buf->size != 0) { /* resume callback sender fiber */ mrb_value chunk = h2o_mruby_new_str(mrb, subreq->buf->bytes, subreq->buf->size); h2o_buffer_consume(&subreq->buf, subreq->buf->size); h2o_mruby_run_fiber(subreq->ctx, detach_receiver(subreq), chunk, NULL); } else if (subreq->state == FINAL_RECEIVED) { h2o_mruby_run_fiber(subreq->ctx, detach_receiver(subreq), mrb_nil_value(), NULL); } mrb_gc_arena_restore(mrb, gc_arena); } static void prepare_subreq_entity(h2o_req_t *subreq, h2o_mruby_context_t *ctx, mrb_value rack_input) { mrb_state *mrb = ctx->shared->mrb; if (mrb_nil_p(rack_input)) { subreq->entity = h2o_iovec_init(NULL, 0); subreq->content_length = 0; return; } // TODO: fastpath? if (!mrb_respond_to(mrb, rack_input, mrb_intern_lit(mrb, "read"))) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "'rack.input' must respond to 'read'")); return; } mrb_value body = mrb_funcall(mrb, rack_input, "read", 0); if (mrb->exc != NULL) return; if (!mrb_string_p(body)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "return value of `read` must be a string")); return; } subreq->entity = h2o_strdup(&subreq->pool, RSTRING_PTR(body), RSTRING_LEN(body)); if (subreq->content_length == SIZE_MAX) { subreq->content_length = subreq->entity.len; } else { if (subreq->content_length > subreq->entity.len) subreq->content_length = subreq->entity.len; else if (subreq->content_length < subreq->entity.len) subreq->entity.len = subreq->content_length; } } static socklen_t parse_hostport(h2o_mem_pool_t *pool, h2o_iovec_t host, h2o_iovec_t port, struct sockaddr_storage *ss) { /* fast path for IPv4 addresses */ { unsigned int d1, d2, d3, d4, _port; int parsed_len; if (sscanf(host.base, "%" SCNd32 "%*[.]%" SCNd32 "%*[.]%" SCNd32 "%*[.]%" SCNd32 "%n", &d1, &d2, &d3, &d4, &parsed_len) == 4 && parsed_len == host.len && d1 <= UCHAR_MAX && d2 <= UCHAR_MAX && d3 <= UCHAR_MAX && d4 <= UCHAR_MAX) { if (sscanf(port.base, "%" SCNd32 "%n", &_port, &parsed_len) == 1 && parsed_len == port.len && _port <= USHRT_MAX) { struct sockaddr_in sin; memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_port = htons(_port); sin.sin_addr.s_addr = ntohl((d1 << 24) + (d2 << 16) + (d3 << 8) + d4); *ss = *((struct sockaddr_storage *)&sin); return sizeof(sin); } } } /* call getaddrinfo */ struct addrinfo hints, *res = NULL; char *hostname = h2o_mem_alloc_pool(pool, char, host.len + 1); memcpy(hostname, host.base, host.len); hostname[host.len] = '\0'; char *servname = h2o_mem_alloc_pool(pool, char, port.len + 1); memcpy(servname, port.base, port.len); hostname[port.len] = '\0'; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; hints.ai_flags = AI_ADDRCONFIG | AI_NUMERICSERV; if (getaddrinfo(hostname, servname, &hints, &res) != 0) { goto Error; } switch (res->ai_family) { case AF_INET: case AF_INET6: memcpy(ss, res->ai_addr, res->ai_addrlen); break; default: goto Error; } socklen_t len = res->ai_addrlen; freeaddrinfo(res); return len; Error: if (res != NULL) freeaddrinfo(res); return 0; } static socklen_t get_sockname(h2o_conn_t *_conn, struct sockaddr *sa) { struct st_mruby_subreq_conn_t *conn = (void *)_conn; if (conn->server.host.base != NULL) { struct st_mruby_subreq_t *subreq = H2O_STRUCT_FROM_MEMBER(struct st_mruby_subreq_t, conn, conn); conn->server.len = parse_hostport(&subreq->super.pool, conn->server.host, conn->server.port, &conn->server.addr); conn->server.host.base = NULL; } memcpy(sa, &conn->server.addr, conn->server.len); return conn->server.len; } static socklen_t get_peername(h2o_conn_t *_conn, struct sockaddr *sa) { struct st_mruby_subreq_conn_t *conn = (void *)_conn; if (conn->remote.host.base != NULL) { struct st_mruby_subreq_t *subreq = H2O_STRUCT_FROM_MEMBER(struct st_mruby_subreq_t, conn, conn); conn->remote.len = parse_hostport(&subreq->super.pool, conn->remote.host, conn->remote.port, &conn->remote.addr); conn->remote.host.base = NULL; } memcpy(sa, &conn->remote.addr, conn->remote.len); return conn->remote.len; } static h2o_socket_t *get_socket(h2o_conn_t *conn) { return NULL; } static int handle_header_env_key(h2o_mruby_shared_context_t *shared_ctx, h2o_iovec_t *env_key, h2o_iovec_t value, void *_req) { h2o_req_t *req = _req; const h2o_token_t *token; /* convert env key to header name (lower case) */ h2o_iovec_t name = convert_env_to_header_name(&req->pool, env_key->base, env_key->len); if (name.base == NULL) return 0; if ((token = h2o_lookup_token(name.base, name.len)) != NULL) { if (token == H2O_TOKEN_CONTENT_LENGTH) { /* skip. use CONTENT_LENGTH instead of HTTP_CONTENT_LENGTH */ } else { value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header(&req->pool, &req->headers, token, NULL, value.base, value.len); } } else { value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header_by_str(&req->pool, &req->headers, name.base, name.len, 0, NULL, value.base, value.len); } return 0; } static void on_subreq_error_callback(void *data, h2o_iovec_t prefix, h2o_iovec_t msg) { struct st_mruby_subreq_t *subreq = (void *)data; mrb_state *mrb = subreq->ctx->shared->mrb; assert(!mrb_nil_p(subreq->error_stream)); h2o_iovec_t concat = h2o_concat(&subreq->super.pool, prefix, msg); mrb_value msgstr = h2o_mruby_new_str(mrb, concat.base, concat.len); mrb_funcall(mrb, subreq->error_stream, "write", 1, msgstr); if (mrb->exc != NULL) { fprintf(stderr, "%s\n", RSTRING_PTR(mrb_inspect(mrb, mrb_obj_value(mrb->exc)))); mrb->exc = NULL; } } /** * relaxed parsing of HTTP version that defaults to 1.1 */ static int parse_protocol_version(const char *s, size_t len) { int ver; if (len < 6) goto Default; if (memcmp(s, "HTTP/", 5) != 0) goto Default; if (!('0' <= s[5] && s[5] <= '9')) goto Default; ver = (s[5] - '0') * 0x100; if (len >= 8 && s[6] == '.' && ('0' <= s[7] && s[7] <= '9')) ver += s[7] - '0'; return ver; Default: return 0x101; } struct st_mruby_env_foreach_data_t { h2o_mruby_context_t *ctx; struct st_mruby_subreq_t *subreq; struct { mrb_value scheme; mrb_value method; mrb_value script_name; mrb_value path_info; mrb_value query_string; mrb_value rack_input; mrb_value http_host; mrb_value server_name; mrb_value server_port; mrb_value server_addr; mrb_value remote_addr; mrb_value remote_port; mrb_value server_protocol; mrb_value remaining_delegations; mrb_value remaining_reprocesses; mrb_value rack_errors; } env; }; static int retrieve_env(mrb_state *mrb, mrb_value key, mrb_value value, void *_data) { struct st_mruby_env_foreach_data_t *data = _data; key = h2o_mruby_to_str(mrb, key); if (mrb->exc != NULL) return -1; #define RETRIEVE_ENV(val, stringify, numify) \ do { \ val = value; \ if (!mrb_nil_p(val)) { \ if (stringify) \ val = h2o_mruby_to_str(mrb, val); \ if (numify) \ val = h2o_mruby_to_int(mrb, val); \ if (mrb->exc != NULL) \ return -1; \ } \ } while (0) #define RETRIEVE_ENV_OBJ(val) RETRIEVE_ENV(val, 0, 0); #define RETRIEVE_ENV_STR(val) RETRIEVE_ENV(val, 1, 0); #define RETRIEVE_ENV_NUM(val) RETRIEVE_ENV(val, 0, 1); #define COND0(str, lit, pos) (sizeof(lit) - 1 <= (pos) || (str)[pos] == (lit)[pos]) #define COND1(str, lit, pos) (COND0(str, lit, pos) && COND0(str, lit, pos + 1) && COND0(str, lit, pos + 2)) #define COND2(str, lit, pos) (COND1(str, lit, pos) && COND1(str, lit, pos + 3) && COND1(str, lit, pos + 6)) #define COND(str, lit) (COND2(str, lit, 0) && COND2(str, lit, 9) && COND2(str, lit, 18)) #define CHECK_KEY(lit) ((sizeof(lit) - 1) == keystr_len && COND(keystr, lit)) const char *keystr = RSTRING_PTR(key); const mrb_int keystr_len = RSTRING_LEN(key); if (CHECK_KEY("CONTENT_LENGTH")) { mrb_value content_length = mrb_nil_value(); RETRIEVE_ENV_NUM(content_length); if (!mrb_nil_p(content_length)) data->subreq->super.content_length = mrb_fixnum(content_length); } else if (CHECK_KEY("HTTP_HOST")) { RETRIEVE_ENV_STR(data->env.http_host); } else if (CHECK_KEY("PATH_INFO")) { RETRIEVE_ENV_STR(data->env.path_info); } else if (CHECK_KEY("QUERY_STRING")) { RETRIEVE_ENV_STR(data->env.query_string); } else if (CHECK_KEY("REMOTE_ADDR")) { RETRIEVE_ENV_STR(data->env.remote_addr); } else if (CHECK_KEY("REMOTE_PORT")) { RETRIEVE_ENV_STR(data->env.remote_port); } else if (CHECK_KEY("REQUEST_METHOD")) { RETRIEVE_ENV_STR(data->env.method); } else if (CHECK_KEY("SCRIPT_NAME")) { RETRIEVE_ENV_STR(data->env.script_name); } else if (CHECK_KEY("SERVER_ADDR")) { RETRIEVE_ENV_STR(data->env.server_addr); } else if (CHECK_KEY("SERVER_NAME")) { RETRIEVE_ENV_STR(data->env.server_name); } else if (CHECK_KEY("SERVER_PORT")) { RETRIEVE_ENV_STR(data->env.server_port); } else if (CHECK_KEY("SERVER_PROTOCOL")) { RETRIEVE_ENV_STR(data->env.server_protocol); } else if (CHECK_KEY("SERVER_SOFTWARE")) { } else if (CHECK_KEY("h2o.remaining_delegations")) { RETRIEVE_ENV_NUM(data->env.remaining_delegations); } else if (CHECK_KEY("h2o.remaining_reprocesses")) { RETRIEVE_ENV_NUM(data->env.remaining_reprocesses); } else if (CHECK_KEY("rack.errors")) { RETRIEVE_ENV_OBJ(data->env.rack_errors); } else if (CHECK_KEY("rack.hijack?")) { } else if (CHECK_KEY("rack.input")) { RETRIEVE_ENV_OBJ(data->env.rack_input); } else if (CHECK_KEY("rack.multiprocess")) { } else if (CHECK_KEY("rack.multithread")) { } else if (CHECK_KEY("rack.run_once")) { } else if (CHECK_KEY("rack.url_scheme")) { RETRIEVE_ENV_STR(data->env.scheme); } else if (keystr_len >= 5 && memcmp(keystr, "HTTP_", 5) == 0) { mrb_value http_header = mrb_nil_value(); RETRIEVE_ENV_STR(http_header); if (!mrb_nil_p(http_header)) h2o_mruby_iterate_header_values(data->ctx->shared, key, http_header, handle_header_env_key, &data->subreq->super); } else if (keystr_len != 0) { /* set to req->env */ mrb_value reqenv = mrb_nil_value(); RETRIEVE_ENV_STR(reqenv); if (!mrb_nil_p(reqenv)) { h2o_vector_reserve(&data->subreq->super.pool, &data->subreq->super.env, data->subreq->super.env.size + 2); data->subreq->super.env.entries[data->subreq->super.env.size] = h2o_strdup(&data->subreq->super.pool, keystr, keystr_len); data->subreq->super.env.entries[data->subreq->super.env.size + 1] = h2o_strdup(&data->subreq->super.pool, RSTRING_PTR(reqenv), RSTRING_LEN(reqenv)); data->subreq->super.env.size += 2; } } #undef RETRIEVE_ENV #undef RETRIEVE_ENV_OBJ #undef RETRIEVE_ENV_STR #undef RETRIEVE_ENV_NUM #undef COND0 #undef COND1 #undef COND2 #undef COND #undef CHECK_KEY return 0; } static struct st_mruby_subreq_t *create_subreq(h2o_mruby_context_t *ctx, mrb_value env, int is_reprocess) { static const h2o_conn_callbacks_t callbacks = {get_sockname, /* stringify address */ get_peername, /* ditto */ NULL, /* push (no push in subrequest) */ get_socket, /* get underlying socket */ NULL, /* get debug state */ {{{NULL}}}}; mrb_state *mrb = ctx->shared->mrb; int gc_arena = mrb_gc_arena_save(mrb); mrb_gc_protect(mrb, env); /* create subreq */ struct st_mruby_subreq_t *subreq = h2o_mem_alloc(sizeof(*subreq)); memset(&subreq->conn, 0, sizeof(subreq->conn)); subreq->ctx = ctx; subreq->receiver = mrb_nil_value(); subreq->refs.request = mrb_nil_value(); subreq->refs.input_stream = mrb_nil_value(); h2o_buffer_init(&subreq->buf, &h2o_socket_buffer_prototype); subreq->shortcut.response = NULL; subreq->shortcut.body = NULL; subreq->state = INITIAL; subreq->chain_proceed = 0; /* initialize super and conn */ subreq->conn.super.ctx = ctx->shared->ctx; h2o_init_request(&subreq->super, &subreq->conn.super, NULL); h2o_ostream_t *ostream = h2o_add_ostream(&subreq->super, H2O_ALIGNOF(*ostream), sizeof(*ostream), &subreq->super._ostr_top); ostream->do_send = subreq_ostream_send; subreq->conn.super.hosts = ctx->handler->pathconf->global->hosts; subreq->conn.super.connected_at = (struct timeval){0}; /* no need because subreq won't logged */ subreq->conn.super.id = 0; /* currently conn->id is used only for logging, so set zero as a meaningless value */ subreq->conn.super.callbacks = &callbacks; struct st_mruby_env_foreach_data_t data = {ctx, subreq, { mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), mrb_nil_value(), }}; /* retrieve env variables */ mrb_hash_foreach(mrb, mrb_hash_ptr(env), retrieve_env, &data); if (mrb->exc != NULL) goto Failed; /* do validations */ #define CHECK_REQUIRED(k, v, non_empty) \ do { \ if (mrb_nil_p(v)) { \ mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "missing required environment key: " k)); \ goto Failed; \ } else if (non_empty && RSTRING_LEN(v) == 0) { \ mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, k " must be not empty")); \ goto Failed; \ } \ } while (0) CHECK_REQUIRED("REQUEST_METHOD", data.env.method, 1); CHECK_REQUIRED("rack.url_scheme", data.env.scheme, 1); CHECK_REQUIRED("SCRIPT_NAME", data.env.script_name, 0); CHECK_REQUIRED("PATH_INFO", data.env.path_info, 0); CHECK_REQUIRED("QUERY_STRING", data.env.query_string, 0); #undef CHECK_REQUIRED if (RSTRING_LEN(data.env.script_name) != 0 && RSTRING_PTR(data.env.script_name)[0] != '/') { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "SCRIPT_NAME must start with `/`")); goto Failed; } if (RSTRING_LEN(data.env.path_info) != 0 && RSTRING_PTR(data.env.path_info)[0] != '/') { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "PATH_INFO must start with `/`")); goto Failed; } if (mrb_nil_p(data.env.http_host) && (mrb_nil_p(data.env.server_name) || mrb_nil_p(data.env.server_port))) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "HTTP_HOST or (SERVER_NAME and SERVER_PORT) is required")); goto Failed; } if (!is_reprocess) { /* ensure that SCRIPT_NAME is not modified */ h2o_iovec_t confpath = ctx->handler->pathconf->path; size_t confpath_len_wo_slash = confpath.base[confpath.len - 1] == '/' ? confpath.len - 1 : confpath.len; if (!(RSTRING_LEN(data.env.script_name) == confpath_len_wo_slash && memcmp(RSTRING_PTR(data.env.script_name), confpath.base, confpath_len_wo_slash) == 0)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit( mrb, E_RUNTIME_ERROR, "can't modify `SCRIPT_NAME` with `H2O.next`. Is `H2O.reprocess` what you want?")); goto Failed; } } #define STR_TO_IOVEC(val) h2o_iovec_init(RSTRING_PTR(val), RSTRING_LEN(val)) /* construct url and parse */ h2o_iovec_t url_comps[9]; int num_comps = 0; url_comps[num_comps++] = STR_TO_IOVEC(data.env.scheme); url_comps[num_comps++] = h2o_iovec_init(H2O_STRLIT("://")); if (!mrb_nil_p(data.env.http_host)) { url_comps[num_comps++] = STR_TO_IOVEC(data.env.http_host); } else { url_comps[num_comps++] = STR_TO_IOVEC(data.env.server_name); url_comps[num_comps++] = h2o_iovec_init(H2O_STRLIT(":")); url_comps[num_comps++] = STR_TO_IOVEC(data.env.server_port); } url_comps[num_comps++] = STR_TO_IOVEC(data.env.script_name); url_comps[num_comps++] = STR_TO_IOVEC(data.env.path_info); if (RSTRING_LEN(data.env.query_string) != 0) { url_comps[num_comps++] = h2o_iovec_init(H2O_STRLIT("?")); url_comps[num_comps++] = STR_TO_IOVEC(data.env.query_string); } h2o_iovec_t url_str = h2o_concat_list(&subreq->super.pool, url_comps, num_comps); h2o_url_t url_parsed; if (h2o_url_parse(url_str.base, url_str.len, &url_parsed) != 0) { /* TODO is there any other way to show better error message? */ mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "env variable contains invalid values")); goto Failed; } /* setup req and conn using retrieved values */ subreq->super.input.scheme = url_parsed.scheme; subreq->super.input.method = h2o_strdup(&subreq->super.pool, RSTRING_PTR(data.env.method), RSTRING_LEN(data.env.method)); subreq->super.input.authority = h2o_strdup(&subreq->super.pool, url_parsed.authority.base, url_parsed.authority.len); subreq->super.input.path = h2o_strdup(&subreq->super.pool, url_parsed.path.base, url_parsed.path.len); h2o_hostconf_t *hostconf = h2o_req_setup(&subreq->super); subreq->super.hostconf = hostconf; subreq->super.pathconf = ctx->handler->pathconf; subreq->super.handler = &ctx->handler->super; subreq->super.version = parse_protocol_version(RSTRING_PTR(data.env.server_protocol), RSTRING_LEN(data.env.server_protocol)); if (!mrb_nil_p(data.env.server_addr) && !mrb_nil_p(data.env.server_port)) { subreq->conn.server.host = h2o_strdup(&subreq->super.pool, RSTRING_PTR(data.env.server_addr), RSTRING_LEN(data.env.server_addr)); subreq->conn.server.port = h2o_strdup(&subreq->super.pool, RSTRING_PTR(data.env.server_port), RSTRING_LEN(data.env.server_port)); } if (!mrb_nil_p(data.env.remote_addr) && !mrb_nil_p(data.env.remote_port)) { subreq->conn.remote.host = h2o_strdup(&subreq->super.pool, RSTRING_PTR(data.env.remote_addr), RSTRING_LEN(data.env.remote_addr)); subreq->conn.remote.port = h2o_strdup(&subreq->super.pool, RSTRING_PTR(data.env.remote_port), RSTRING_LEN(data.env.remote_port)); } if (!mrb_nil_p(data.env.remaining_delegations)) { mrb_int v = mrb_fixnum(data.env.remaining_delegations); subreq->super.remaining_delegations = (unsigned)(v < 0 ? 0 : v); } if (!mrb_nil_p(data.env.remaining_reprocesses)) { mrb_int v = mrb_fixnum(data.env.remaining_reprocesses); subreq->super.remaining_reprocesses = (unsigned)(v < 0 ? 0 : v); } if (!mrb_nil_p(data.env.rack_errors)) { subreq->error_stream = data.env.rack_errors; mrb_gc_register(mrb, data.env.rack_errors); subreq->super.error_log_delegate.cb = on_subreq_error_callback; subreq->super.error_log_delegate.data = subreq; } prepare_subreq_entity(&subreq->super, ctx, data.env.rack_input); if (mrb->exc != NULL) goto Failed; return subreq; Failed: assert(mrb->exc != NULL); dispose_subreq(subreq); mrb_gc_arena_restore(mrb, gc_arena); return NULL; #undef STR_TO_IOVEC } static mrb_value middleware_wait_response_callback(h2o_mruby_context_t *mctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { mrb_state *mrb = mctx->shared->mrb; struct st_mruby_subreq_t *subreq; if ((subreq = mrb_data_check_get_ptr(mrb, mrb_ary_entry(args, 0), &app_request_type)) == NULL) { *run_again = 1; return mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "AppRequest#join wrong self"); } subreq->receiver = *receiver; mrb_gc_register(mrb, *receiver); return mrb_nil_value(); } static mrb_value can_build_response_method(mrb_state *mrb, mrb_value self) { struct st_mruby_subreq_t *subreq = mrb_data_check_get_ptr(mrb, self, &app_request_type); if (subreq == NULL) mrb_raise(mrb, E_ARGUMENT_ERROR, "AppRequest#_can_build_response? wrong self"); return mrb_bool_value(subreq->state != INITIAL); } static mrb_value build_response_method(mrb_state *mrb, mrb_value self) { struct st_mruby_subreq_t *subreq = mrb_data_check_get_ptr(mrb, self, &app_request_type); if (subreq == NULL) mrb_raise(mrb, E_ARGUMENT_ERROR, "AppRequest#build_response wrong self"); mrb_value resp = build_app_response(subreq); subreq->refs.input_stream = mrb_ary_entry(resp, 2); return resp; } static mrb_value middleware_request_method(mrb_state *mrb, mrb_value self) { h2o_mruby_shared_context_t *shared_ctx = mrb->ud; h2o_mruby_context_t *ctx = shared_ctx->current_context; assert(ctx != NULL); mrb_value env; mrb_value reprocess; mrb_get_args(mrb, "H", &env); reprocess = mrb_iv_get(mrb, self, mrb_intern_lit(mrb, "@reprocess")); /* create subreq */ struct st_mruby_subreq_t *subreq = create_subreq(shared_ctx->current_context, env, mrb_bool(reprocess)); if (mrb->exc != NULL) { mrb_value exc = mrb_obj_value(mrb->exc); mrb->exc = NULL; mrb_exc_raise(mrb, exc); } subreq->refs.request = h2o_mruby_create_data_instance(mrb, mrb_ary_entry(ctx->shared->constants, H2O_MRUBY_APP_REQUEST_CLASS), subreq, &app_request_type); h2o_req_t *super = &subreq->super; if (mrb_bool(reprocess)) { h2o_reprocess_request_deferred(super, super->method, super->scheme, super->authority, super->path, super->overrides, 1); } else { h2o_delegate_request_deferred(super); } return subreq->refs.request; } static mrb_value middleware_wait_chunk_callback(h2o_mruby_context_t *mctx, mrb_value input, mrb_value *receiver, mrb_value args, int *run_again) { mrb_state *mrb = mctx->shared->mrb; struct st_mruby_subreq_t *subreq; mrb_value obj = mrb_ary_entry(args, 0); if (DATA_PTR(obj) == NULL) { return mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "downstream HTTP closed"); } else if ((subreq = mrb_data_check_get_ptr(mrb, obj, &app_input_stream_type)) == NULL) { return mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "AppInputStream#each wrong self"); } if (subreq->buf->size != 0) { *run_again = 1; mrb_value chunk = h2o_mruby_new_str(mrb, subreq->buf->bytes, subreq->buf->size); h2o_buffer_consume(&subreq->buf, subreq->buf->size); return chunk; } else if (subreq->state == FINAL_RECEIVED) { *run_again = 1; return mrb_nil_value(); } else { assert(mrb_nil_p(subreq->receiver)); subreq->receiver = *receiver; mrb_gc_register(mrb, *receiver); return mrb_nil_value(); } } void h2o_mruby_middleware_init_context(h2o_mruby_shared_context_t *shared_ctx) { mrb_state *mrb = shared_ctx->mrb; h2o_mruby_eval_expr(mrb, H2O_MRUBY_CODE_MIDDLEWARE); h2o_mruby_assert(mrb); struct RClass *module = mrb_define_module(mrb, "H2O"); struct RClass *app_klass = mrb_class_get_under(shared_ctx->mrb, module, "App"); mrb_define_method(mrb, app_klass, "request", middleware_request_method, MRB_ARGS_ARG(1, 0)); struct RClass *app_request_klass = mrb_class_get_under(shared_ctx->mrb, module, "AppRequest"); mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_APP_REQUEST_CLASS, mrb_obj_value(app_request_klass)); h2o_mruby_define_callback(mrb, "_h2o_middleware_wait_response", middleware_wait_response_callback); mrb_define_method(mrb, app_request_klass, "_can_build_response?", can_build_response_method, MRB_ARGS_NONE()); mrb_define_method(mrb, app_request_klass, "_build_response", build_response_method, MRB_ARGS_NONE()); struct RClass *app_input_stream_klass = mrb_class_get_under(shared_ctx->mrb, module, "AppInputStream"); mrb_ary_set(shared_ctx->mrb, shared_ctx->constants, H2O_MRUBY_APP_INPUT_STREAM_CLASS, mrb_obj_value(app_input_stream_klass)); h2o_mruby_define_callback(mrb, "_h2o_middleware_wait_chunk", middleware_wait_chunk_callback); h2o_mruby_assert(mrb); } void do_sender_start(h2o_mruby_generator_t *generator) { struct st_h2o_mruby_middleware_sender_t *sender = (void *)generator->sender; struct st_mruby_subreq_t *subreq = sender->subreq; if (subreq->buf->size == 0 && subreq->state != FINAL_RECEIVED) { h2o_doublebuffer_prepare_empty(&sender->sending); h2o_send(generator->req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS); } else { h2o_mruby_sender_do_send_buffer(generator, &sender->sending, &subreq->buf, subreq->state == FINAL_RECEIVED ? H2O_SEND_STATE_FINAL : H2O_SEND_STATE_IN_PROGRESS); } } void do_sender_proceed(h2o_generator_t *_generator, h2o_req_t *req) { h2o_mruby_generator_t *generator = (void *)_generator; struct st_h2o_mruby_middleware_sender_t *sender = (void *)generator->sender; struct st_mruby_subreq_t *subreq = sender->subreq; if (generator->sender->final_sent) return; /* TODO: close subreq ASAP */ if (subreq->buf != NULL) { h2o_doublebuffer_consume(&sender->sending); if (subreq->buf->size != 0) { h2o_mruby_sender_do_send_buffer(generator, &sender->sending, &subreq->buf, subreq->state == FINAL_RECEIVED ? H2O_SEND_STATE_FINAL : H2O_SEND_STATE_IN_PROGRESS); return; /* don't proceed because it's already requested in subreq_ostream_send*/ } else { /* start direct shortcut */ h2o_buffer_dispose(&subreq->buf); subreq->buf = NULL; } } if (sender->subreq->chain_proceed) h2o_proceed_response(&sender->subreq->super); } void do_sender_dispose(h2o_mruby_generator_t *generator) { struct st_h2o_mruby_middleware_sender_t *sender = (void *)generator->sender; h2o_doublebuffer_dispose(&sender->sending); if (sender->subreq->shortcut.response != NULL) { assert(!mrb_nil_p(sender->subreq->refs.request)); mrb_gc_unregister(generator->ctx->shared->mrb, sender->subreq->refs.request); sender->subreq->shortcut.response = NULL; } assert(sender->subreq->shortcut.body == generator); sender->subreq->shortcut.body = NULL; dispose_subreq(sender->subreq); sender->subreq = NULL; h2o_mruby_sender_close_body(generator); } static h2o_mruby_sender_t *create_sender(h2o_mruby_generator_t *generator, struct st_mruby_subreq_t *subreq, mrb_value body) { struct st_h2o_mruby_middleware_sender_t *sender = (void *)h2o_mruby_sender_create(generator, body, H2O_ALIGNOF(*sender), sizeof(*sender)); sender->subreq = subreq; h2o_doublebuffer_init(&sender->sending, &h2o_socket_buffer_prototype); sender->super.start = do_sender_start; sender->super.proceed = do_sender_proceed; sender->super.dispose = do_sender_dispose; subreq->shortcut.body = generator; return &sender->super; } h2o_mruby_sender_t *h2o_mruby_middleware_sender_create(h2o_mruby_generator_t *generator, mrb_value body) { mrb_state *mrb = generator->ctx->shared->mrb; struct st_mruby_subreq_t *subreq; assert(mrb->exc == NULL); if ((subreq = mrb_data_check_get_ptr(mrb, body, &app_input_stream_type)) == NULL) return NULL; return create_sender(generator, subreq, body); } static void send_response_shortcutted(struct st_mruby_subreq_t *subreq) { h2o_mruby_generator_t *generator = subreq->shortcut.response; assert(generator != NULL); /* copy response except for headers and original */ generator->req->res.status = subreq->super.res.status; generator->req->res.reason = subreq->super.res.reason; generator->req->res.content_length = subreq->super.res.content_length; generator->req->res.mime_attr = subreq->super.res.mime_attr; /* handle response headers */ int i; for (i = 0; i != subreq->super.res.headers.size; ++i) { h2o_header_t *header = subreq->super.res.headers.entries + i; h2o_mruby_set_response_header(generator->ctx->shared, header->name, header->value, generator->req); } /* add date: if it's missing from the response */ if (h2o_find_header(&generator->req->res.headers, H2O_TOKEN_DATE, SIZE_MAX) == -1) h2o_resp_add_date_header(generator->req); /* setup body sender */ h2o_mruby_sender_t *sender = create_sender(generator, subreq, mrb_nil_value()); generator->sender = sender; generator->super.proceed = sender->proceed; /* start sending response */ h2o_start_response(generator->req, &generator->super); generator->sender->start(generator); } static int send_response_callback(h2o_mruby_generator_t *generator, mrb_int status, mrb_value resp, int *is_delegate) { struct st_mruby_subreq_t *subreq = mrb_data_check_get_ptr(generator->ctx->shared->mrb, resp, &app_request_type); assert(subreq != NULL); assert(mrb_obj_ptr(subreq->refs.request) == mrb_obj_ptr(resp)); subreq->shortcut.response = generator; mrb_gc_register(generator->ctx->shared->mrb, resp); /* prevent request and subreq from being disposed */ if (subreq->state != INITIAL) { /* immediately start sending response, otherwise defer it until once receive data from upstream (subreq_ostream_send) */ send_response_shortcutted(subreq); } return 0; } h2o_mruby_send_response_callback_t h2o_mruby_middleware_get_send_response_callback(h2o_mruby_context_t *ctx, mrb_value resp) { mrb_state *mrb = ctx->shared->mrb; struct st_mruby_subreq_t *subreq; if ((subreq = mrb_data_check_get_ptr(mrb, resp, &app_request_type)) == NULL) return NULL; return send_response_callback; }
1
13,454
@i110 Do you have an idea on how we should propagate errors that occur in this block? The error condition happens when i) `h2o_buffer_reserve` fails to allocate space (happens in master as well), or ii) `fill_cb` returns an error (unique to this PR).
h2o-h2o
c
@@ -3,7 +3,7 @@ class ReportMailer < ApplicationMailer def budget_status to_email = ENV.fetch('BUDGET_REPORT_RECIPIENT') - date = Time.now.in_time_zone('Eastern Time (US & Canada)').strftime("%a %m/%d/%y") + date = Time.now.utc.strftime("%a %m/%d/%y (%Z)") mail( to: to_email,
1
class ReportMailer < ApplicationMailer add_template_helper ReportHelper def budget_status to_email = ENV.fetch('BUDGET_REPORT_RECIPIENT') date = Time.now.in_time_zone('Eastern Time (US & Canada)').strftime("%a %m/%d/%y") mail( to: to_email, subject: "C2: Daily Budget report for #{date}", from: self.sender_email ) end end
1
13,855
Does this mean the times will show up in emails as UTC?
18F-C2
rb
@@ -191,11 +191,10 @@ options.Bounds = Options('style', color='black') options.Ellipse = Options('style', color='black') options.Polygons = Options('style', color=Cycle(), line_color='black', cmap=dflt_cmap) -options.Rectangles = Options('style', cmap=dflt_cmap) -options.Segments = Options('style', cmap=dflt_cmap) # Geometries -options.Rectangles = Options('style', line_color='black') +options.Rectangles = Options('style', color=Cycle(), cmap=dflt_cmap, line_color='black') +options.Segments = Options('style', color='black', cmap=dflt_cmap) # Rasters options.Image = Options('style', cmap=config.default_gridded_cmap)
1
import sys import numpy as np from bokeh.palettes import all_palettes from param import concrete_descendents from ...core import (Store, Overlay, NdOverlay, Layout, AdjointLayout, GridSpace, GridMatrix, NdLayout, config) from ...element import (Curve, Points, Scatter, Image, Raster, Path, RGB, Histogram, Spread, HeatMap, Contours, Bars, Box, Bounds, Ellipse, Polygons, BoxWhisker, Arrow, ErrorBars, Text, HLine, VLine, HSpan, VSpan, Spline, Spikes, Table, ItemTable, Area, HSV, QuadMesh, VectorField, Graph, Nodes, EdgePaths, Distribution, Bivariate, TriMesh, Violin, Chord, Div, HexTiles, Labels, Sankey, Tiles, Segments, Slope, Rectangles) from ...core.options import Options, Cycle, Palette try: from ...interface import DFrame except: DFrame = None from .annotation import ( TextPlot, LineAnnotationPlot, BoxAnnotationPlot, SplinePlot, ArrowPlot, DivPlot, LabelsPlot, SlopePlot ) from ..plot import PlotSelector from ..util import fire from .callbacks import Callback # noqa (API import) from .element import OverlayPlot, ElementPlot from .chart import (PointPlot, CurvePlot, SpreadPlot, ErrorPlot, HistogramPlot, SideHistogramPlot, BarPlot, SpikesPlot, SideSpikesPlot, AreaPlot, VectorFieldPlot) from .geometry import SegmentPlot, RectanglesPlot from .graphs import GraphPlot, NodePlot, TriMeshPlot, ChordPlot from .heatmap import HeatMapPlot, RadialHeatMapPlot from .hex_tiles import HexTilesPlot from .links import LinkCallback # noqa (API import) from .path import PathPlot, PolygonPlot, ContourPlot from .plot import GridPlot, LayoutPlot, AdjointLayoutPlot from .raster import RasterPlot, RGBPlot, HSVPlot, QuadMeshPlot from .renderer import BokehRenderer from .sankey import SankeyPlot from .stats import DistributionPlot, BivariatePlot, BoxWhiskerPlot, ViolinPlot from .tabular import TablePlot from .tiles import TilePlot from .util import bokeh_version # noqa (API import) Store.renderers['bokeh'] = BokehRenderer.instance() if len(Store.renderers) == 1: Store.set_current_backend('bokeh') associations = {Overlay: OverlayPlot, NdOverlay: OverlayPlot, GridSpace: GridPlot, GridMatrix: GridPlot, AdjointLayout: AdjointLayoutPlot, Layout: LayoutPlot, NdLayout: LayoutPlot, # Charts Curve: CurvePlot, Bars: BarPlot, Points: PointPlot, Scatter: PointPlot, ErrorBars: ErrorPlot, Spread: SpreadPlot, Spikes: SpikesPlot, Area: AreaPlot, VectorField: VectorFieldPlot, Histogram: HistogramPlot, # Rasters Image: RasterPlot, RGB: RGBPlot, HSV: HSVPlot, Raster: RasterPlot, HeatMap: PlotSelector(HeatMapPlot.is_radial, {True: RadialHeatMapPlot, False: HeatMapPlot}, True), QuadMesh: QuadMeshPlot, # Paths Path: PathPlot, Contours: ContourPlot, Path: PathPlot, Box: PathPlot, Bounds: PathPlot, Ellipse: PathPlot, Polygons: PolygonPlot, # Geometry Rectangles: RectanglesPlot, Segments: SegmentPlot, # Annotations HLine: LineAnnotationPlot, VLine: LineAnnotationPlot, HSpan: BoxAnnotationPlot, VSpan: BoxAnnotationPlot, Slope: SlopePlot, Text: TextPlot, Labels: LabelsPlot, Spline: SplinePlot, Arrow: ArrowPlot, Div: DivPlot, Tiles: TilePlot, # Graph Elements Graph: GraphPlot, Chord: ChordPlot, Nodes: NodePlot, EdgePaths: PathPlot, TriMesh: TriMeshPlot, Sankey: SankeyPlot, # Tabular Table: TablePlot, ItemTable: TablePlot, # Statistics Distribution: DistributionPlot, Bivariate: BivariatePlot, BoxWhisker: BoxWhiskerPlot, Violin: ViolinPlot, HexTiles: HexTilesPlot} if DFrame is not None: associations[DFrame] = TablePlot Store.register(associations, 'bokeh') if config.no_padding: for plot in concrete_descendents(ElementPlot).values(): plot.padding = 0 # Raster types, Path types and VectorField should have frames for framedcls in [VectorFieldPlot, ContourPlot, PathPlot, PolygonPlot, RasterPlot, RGBPlot, HSVPlot, QuadMeshPlot, HeatMapPlot]: framedcls.show_frame = True AdjointLayoutPlot.registry[Histogram] = SideHistogramPlot AdjointLayoutPlot.registry[Spikes] = SideSpikesPlot point_size = np.sqrt(6) # Matches matplotlib default # Register bokeh.palettes with Palette and Cycle def colormap_generator(palette): # Epsilon ensures float precision doesn't cause issues (#4911) epsilon = sys.float_info.epsilon*10 return lambda value: palette[int(value*(len(palette)-1)+epsilon)] Palette.colormaps.update({name: colormap_generator(p[max(p.keys())]) for name, p in all_palettes.items()}) Cycle.default_cycles.update({name: p[max(p.keys())] for name, p in all_palettes.items() if max(p.keys()) < 256}) dflt_cmap = config.default_cmap all_palettes['fire'] = {len(fire): fire} options = Store.options(backend='bokeh') # Charts options.Curve = Options('style', color=Cycle(), line_width=2) options.BoxWhisker = Options('style', box_fill_color=Cycle(), whisker_color='black', box_line_color='black', outlier_color='black') options.Scatter = Options('style', color=Cycle(), size=point_size, cmap=dflt_cmap) options.Points = Options('style', color=Cycle(), size=point_size, cmap=dflt_cmap) options.Points = Options('plot', show_frame=True) options.Histogram = Options('style', line_color='black', color=Cycle(), muted_alpha=0.2) options.ErrorBars = Options('style', color='black') options.Spread = Options('style', color=Cycle(), alpha=0.6, line_color='black', muted_alpha=0.2) options.Bars = Options('style', color=Cycle(), line_color='black', bar_width=0.8, muted_alpha=0.2) options.Spikes = Options('style', color='black', cmap=dflt_cmap, muted_alpha=0.2) options.Area = Options('style', color=Cycle(), alpha=1, line_color='black', muted_alpha=0.2) options.VectorField = Options('style', color='black', muted_alpha=0.2) # Paths options.Contours = Options('plot', show_legend=True) options.Contours = Options('style', color=Cycle(), cmap=dflt_cmap) options.Path = Options('style', color=Cycle(), cmap=dflt_cmap) options.Box = Options('style', color='black') options.Bounds = Options('style', color='black') options.Ellipse = Options('style', color='black') options.Polygons = Options('style', color=Cycle(), line_color='black', cmap=dflt_cmap) options.Rectangles = Options('style', cmap=dflt_cmap) options.Segments = Options('style', cmap=dflt_cmap) # Geometries options.Rectangles = Options('style', line_color='black') # Rasters options.Image = Options('style', cmap=config.default_gridded_cmap) options.Raster = Options('style', cmap=config.default_gridded_cmap) options.QuadMesh = Options('style', cmap=config.default_gridded_cmap, line_alpha=0) options.HeatMap = Options('style', cmap=config.default_heatmap_cmap, annular_line_alpha=0, xmarks_line_color="#FFFFFF", xmarks_line_width=3, ymarks_line_color="#FFFFFF", ymarks_line_width=3) # Annotations options.HLine = Options('style', color=Cycle(), line_width=3, alpha=1) options.VLine = Options('style', color=Cycle(), line_width=3, alpha=1) options.Slope = Options('style', color=Cycle(), line_width=3, alpha=1) options.VSpan = Options('style', color=Cycle(), alpha=0.5) options.HSpan = Options('style', color=Cycle(), alpha=0.5) options.Arrow = Options('style', arrow_size=10) options.Labels = Options('style', text_align='center', text_baseline='middle') # Graphs options.Graph = Options( 'style', node_size=15, node_color=Cycle(), node_line_color='black', node_nonselection_fill_color=Cycle(), node_hover_line_color='black', node_hover_fill_color='limegreen', node_nonselection_alpha=0.2, edge_nonselection_alpha=0.2, node_nonselection_line_color='black', edge_color='black', edge_line_width=2, edge_nonselection_line_color='black', edge_hover_line_color='limegreen' ) options.TriMesh = Options( 'style', node_size=5, node_line_color='black', node_color='white', edge_line_color='black', node_hover_fill_color='limegreen', edge_line_width=1, edge_hover_line_color='limegreen', edge_nonselection_alpha=0.2, edge_nonselection_line_color='black', node_nonselection_alpha=0.2, cmap=dflt_cmap ) options.TriMesh = Options('plot', tools=[]) options.Chord = Options('style', node_size=15, node_color=Cycle(), node_line_color='black', node_selection_fill_color='limegreen', node_nonselection_fill_color=Cycle(), node_hover_line_color='black', node_nonselection_line_color='black', node_selection_line_color='black', node_hover_fill_color='limegreen', node_nonselection_alpha=0.2, edge_nonselection_alpha=0.1, edge_line_color='black', edge_line_width=1, edge_nonselection_line_color='black', edge_hover_line_color='limegreen', edge_selection_line_color='limegreen', label_text_font_size='8pt') options.Chord = Options('plot', xaxis=None, yaxis=None) options.Nodes = Options('style', line_color='black', color=Cycle(), size=20, nonselection_fill_color=Cycle(), selection_fill_color='limegreen', hover_fill_color='indianred') options.Nodes = Options('plot', tools=['hover', 'tap']) options.EdgePaths = Options('style', color='black', nonselection_alpha=0.2, line_width=2, selection_color='limegreen', hover_line_color='indianred') options.EdgePaths = Options('plot', tools=['hover', 'tap']) options.Sankey = Options( 'plot', xaxis=None, yaxis=None, inspection_policy='edges', selection_policy='nodes', show_frame=False, width=1000, height=600 ) options.Sankey = Options( 'style', node_nonselection_alpha=0.2, node_size=10, edge_nonselection_alpha=0.2, edge_fill_alpha=0.6, label_text_font_size='8pt', cmap='Category20', node_line_color='black', node_selection_line_color='black', node_hover_alpha=1, edge_hover_alpha=0.9 ) # Define composite defaults options.GridMatrix = Options('plot', shared_xaxis=True, shared_yaxis=True, xaxis=None, yaxis=None) options.Overlay = Options('style', click_policy='mute') options.NdOverlay = Options('style', click_policy='mute') options.Curve = Options('style', muted_alpha=0.2) options.Path = Options('style', muted_alpha=0.2) options.Scatter = Options('style', muted_alpha=0.2) options.Points = Options('style', muted_alpha=0.2) options.Polygons = Options('style', muted_alpha=0.2) # Statistics options.Distribution = Options( 'style', color=Cycle(), line_color='black', fill_alpha=0.5, muted_alpha=0.2 ) options.Violin = Options( 'style', violin_fill_color=Cycle(), violin_line_color='black', violin_fill_alpha=0.5, stats_color='black', box_color='black', median_color='white', cmap='Category10' ) options.HexTiles = Options('style', muted_alpha=0.2)
1
24,548
What's the motivation for having one of these be a cycle and the other be a fixed color?
holoviz-holoviews
py
@@ -136,7 +136,7 @@ namespace ResultsComparer var table = data.ToMarkdownTable().WithHeaders(conclusion.ToString(), conclusion == EquivalenceTestConclusion.Faster ? "base/diff" : "diff/base", "Base Median (ns)", "Diff Median (ns)", "Modality"); - foreach (var line in table.ToMarkdown().Split(Environment.NewLine, StringSplitOptions.RemoveEmptyEntries)) + foreach (var line in table.ToMarkdown().Split(Environment.NewLine)) Console.WriteLine($"| {line.TrimStart()}|"); // the table starts with \t and does not end with '|' and it looks bad so we fix it Console.WriteLine();
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Linq; using System.Text.RegularExpressions; using System.Threading; using System.Xml; using BenchmarkDotNet.Mathematics; using BenchmarkDotNet.Mathematics.StatisticalTesting; using CommandLine; using DataTransferContracts; using MarkdownLog; using Newtonsoft.Json; namespace ResultsComparer { public class Program { private const string FullBdnJsonFileExtension = "full.json"; public static void Main(string[] args) { // we print a lot of numbers here and we want to make it always in invariant way Thread.CurrentThread.CurrentCulture = CultureInfo.InvariantCulture; Parser.Default.ParseArguments<CommandLineOptions>(args).WithParsed(Compare); } private static void Compare(CommandLineOptions args) { if (!Threshold.TryParse(args.StatisticalTestThreshold, out var testThreshold)) { Console.WriteLine($"Invalid Threshold {args.StatisticalTestThreshold}. Examples: 5%, 10ms, 100ns, 1s."); return; } if (!Threshold.TryParse(args.NoiseThreshold, out var noiseThreshold)) { Console.WriteLine($"Invalid Noise Threshold {args.NoiseThreshold}. Examples: 0.3ns 1ns."); return; } var notSame = GetNotSameResults(args, testThreshold, noiseThreshold).ToArray(); if (!notSame.Any()) { Console.WriteLine($"No differences found between the benchmark results with threshold {testThreshold}."); return; } PrintSummary(notSame); PrintTable(notSame, EquivalenceTestConclusion.Slower, args); PrintTable(notSame, EquivalenceTestConclusion.Faster, args); ExportToCsv(notSame, args.CsvPath); ExportToXml(notSame, args.XmlPath); } private static IEnumerable<(string id, Benchmark baseResult, Benchmark diffResult, EquivalenceTestConclusion conclusion)> GetNotSameResults(CommandLineOptions args, Threshold testThreshold, Threshold noiseThreshold) { foreach (var pair in ReadResults(args) .Where(result => result.baseResult.Statistics != null && result.diffResult.Statistics != null)) // failures { var baseValues = pair.baseResult.GetOriginalValues(); var diffValues = pair.diffResult.GetOriginalValues(); var userTresholdResult = StatisticalTestHelper.CalculateTost(MannWhitneyTest.Instance, baseValues, diffValues, testThreshold); if (userTresholdResult.Conclusion == EquivalenceTestConclusion.Same) continue; var noiseResult = StatisticalTestHelper.CalculateTost(MannWhitneyTest.Instance, baseValues, diffValues, noiseThreshold); if (noiseResult.Conclusion == EquivalenceTestConclusion.Same) continue; yield return (pair.id, pair.baseResult, pair.diffResult, userTresholdResult.Conclusion); } } private static void PrintSummary((string id, Benchmark baseResult, Benchmark diffResult, EquivalenceTestConclusion conclusion)[] notSame) { var better = notSame.Where(result => result.conclusion == EquivalenceTestConclusion.Faster); var worse = notSame.Where(result => result.conclusion == EquivalenceTestConclusion.Slower); var betterCount = better.Count(); var worseCount = worse.Count(); // If the baseline doesn't have the same set of tests, you wind up with Infinity in the list of diffs. // Exclude them for purposes of geomean. worse = worse.Where(x => GetRatio(x) != double.PositiveInfinity); better = better.Where(x => GetRatio(x) != double.PositiveInfinity); Console.WriteLine("summary:"); if (betterCount > 0) { var betterGeoMean = Math.Pow(10, better.Skip(1).Aggregate(Math.Log10(GetRatio(better.First())), (x, y) => x + Math.Log10(GetRatio(y))) / better.Count()); Console.WriteLine($"better: {betterCount}, geomean: {betterGeoMean:F3}"); } if (worseCount > 0) { var worseGeoMean = Math.Pow(10, worse.Skip(1).Aggregate(Math.Log10(GetRatio(worse.First())), (x, y) => x + Math.Log10(GetRatio(y))) / worse.Count()); Console.WriteLine($"worse: {worseCount}, geomean: {worseGeoMean:F3}"); } Console.WriteLine($"total diff: {notSame.Count()}"); Console.WriteLine(); } private static void PrintTable((string id, Benchmark baseResult, Benchmark diffResult, EquivalenceTestConclusion conclusion)[] notSame, EquivalenceTestConclusion conclusion, CommandLineOptions args) { var data = notSame .Where(result => result.conclusion == conclusion) .OrderByDescending(result => GetRatio(conclusion, result.baseResult, result.diffResult)) .Take(args.TopCount ?? int.MaxValue) .Select(result => new { Id = result.id.Length > 80 ? result.id.Substring(0, 80) : result.id, DisplayValue = GetRatio(conclusion, result.baseResult, result.diffResult), BaseMedian = result.baseResult.Statistics.Median, DiffMedian = result.diffResult.Statistics.Median, Modality = GetModalInfo(result.baseResult) ?? GetModalInfo(result.diffResult) }) .ToArray(); if (!data.Any()) { Console.WriteLine($"No {conclusion} results for the provided threshold = {args.StatisticalTestThreshold} and noise filter = {args.NoiseThreshold}."); Console.WriteLine(); return; } var table = data.ToMarkdownTable().WithHeaders(conclusion.ToString(), conclusion == EquivalenceTestConclusion.Faster ? "base/diff" : "diff/base", "Base Median (ns)", "Diff Median (ns)", "Modality"); foreach (var line in table.ToMarkdown().Split(Environment.NewLine, StringSplitOptions.RemoveEmptyEntries)) Console.WriteLine($"| {line.TrimStart()}|"); // the table starts with \t and does not end with '|' and it looks bad so we fix it Console.WriteLine(); } private static IEnumerable<(string id, Benchmark baseResult, Benchmark diffResult)> ReadResults(CommandLineOptions args) { var baseFiles = GetFilesToParse(args.BasePath); var diffFiles = GetFilesToParse(args.DiffPath); if (!baseFiles.Any() || !diffFiles.Any()) throw new ArgumentException($"Provided paths contained no {FullBdnJsonFileExtension} files."); var baseResults = baseFiles.Select(ReadFromFile); var diffResults = diffFiles.Select(ReadFromFile); var filters = args.Filters.Select(pattern => new Regex(WildcardToRegex(pattern), RegexOptions.IgnoreCase | RegexOptions.CultureInvariant)).ToArray(); var benchmarkIdToDiffResults = diffResults .SelectMany(result => result.Benchmarks) .Where(benchmarkResult => !filters.Any() || filters.Any(filter => filter.IsMatch(benchmarkResult.FullName))) .ToDictionary(benchmarkResult => benchmarkResult.FullName, benchmarkResult => benchmarkResult); return baseResults .SelectMany(result => result.Benchmarks) .ToDictionary(benchmarkResult => benchmarkResult.FullName, benchmarkResult => benchmarkResult) // we use ToDictionary to make sure the results have unique IDs .Where(baseResult => benchmarkIdToDiffResults.ContainsKey(baseResult.Key)) .Select(baseResult => (baseResult.Key, baseResult.Value, benchmarkIdToDiffResults[baseResult.Key])); } private static void ExportToCsv((string id, Benchmark baseResult, Benchmark diffResult, EquivalenceTestConclusion conclusion)[] notSame, FileInfo csvPath) { if (csvPath == null) return; if (csvPath.Exists) csvPath.Delete(); using (var textWriter = csvPath.CreateText()) { foreach (var result in notSame) { textWriter.WriteLine($"\"{result.id.Replace("\"", "\"\"")}\";base;{result.conclusion};{string.Join(';', result.baseResult.GetOriginalValues())}"); textWriter.WriteLine($"\"{result.id.Replace("\"", "\"\"")}\";diff;{result.conclusion};{string.Join(';', result.diffResult.GetOriginalValues())}"); } } Console.WriteLine($"CSV results exported to {csvPath.FullName}"); } private static void ExportToXml((string id, Benchmark baseResult, Benchmark diffResult, EquivalenceTestConclusion conclusion)[] notSame, FileInfo xmlPath) { if (xmlPath == null) { Console.WriteLine("No file given"); return; } if (xmlPath.Exists) xmlPath.Delete(); using (XmlWriter writer = XmlWriter.Create(xmlPath.Open(FileMode.OpenOrCreate, FileAccess.Write, FileShare.Write))) { writer.WriteStartElement("performance-tests"); foreach (var slower in notSame.Where(x => x.conclusion == EquivalenceTestConclusion.Slower)) { writer.WriteStartElement("test"); writer.WriteAttributeString("name", slower.id); writer.WriteAttributeString("type", slower.baseResult.Type); writer.WriteAttributeString("method", slower.baseResult.Method); writer.WriteAttributeString("time", "0"); writer.WriteAttributeString("result", "Fail"); writer.WriteStartElement("failure"); writer.WriteAttributeString("exception-type", "Regression"); writer.WriteElementString("message", $"{slower.id} has regressed, was {slower.baseResult.Statistics.Median} is {slower.diffResult.Statistics.Median}."); writer.WriteEndElement(); } foreach (var faster in notSame.Where(x => x.conclusion == EquivalenceTestConclusion.Faster)) { writer.WriteStartElement("test"); writer.WriteAttributeString("name", faster.id); writer.WriteAttributeString("type", faster.baseResult.Type); writer.WriteAttributeString("method", faster.baseResult.Method); writer.WriteAttributeString("time", "0"); writer.WriteAttributeString("result", "Skip"); writer.WriteElementString("reason", $"{faster.id} has improved, was {faster.baseResult.Statistics.Median} is {faster.diffResult.Statistics.Median}."); writer.WriteEndElement(); } writer.WriteEndElement(); writer.Flush(); } Console.WriteLine($"XML results exported to {xmlPath.FullName}"); } private static string[] GetFilesToParse(string path) { if (Directory.Exists(path)) return Directory.GetFiles(path, $"*{FullBdnJsonFileExtension}", SearchOption.AllDirectories); else if (File.Exists(path) || !path.EndsWith(FullBdnJsonFileExtension)) return new[] { path }; else throw new FileNotFoundException($"Provided path does NOT exist or is not a {path} file", path); } // code and magic values taken from BenchmarkDotNet.Analysers.MultimodalDistributionAnalyzer // See http://www.brendangregg.com/FrequencyTrails/modes.html private static string GetModalInfo(Benchmark benchmark) { if (benchmark.Statistics.N < 12) // not enough data to tell return null; double mValue = MathHelper.CalculateMValue(new BenchmarkDotNet.Mathematics.Statistics(benchmark.GetOriginalValues())); if (mValue > 4.2) return "multimodal"; else if (mValue > 3.2) return "bimodal"; else if (mValue > 2.8) return "several?"; return null; } private static double GetRatio((string id, Benchmark baseResult, Benchmark diffResult, EquivalenceTestConclusion conclusion) item) => GetRatio(item.conclusion, item.baseResult, item.diffResult); private static double GetRatio(EquivalenceTestConclusion conclusion, Benchmark baseResult, Benchmark diffResult) => conclusion == EquivalenceTestConclusion.Faster ? baseResult.Statistics.Median / diffResult.Statistics.Median : diffResult.Statistics.Median / baseResult.Statistics.Median; private static BdnResult ReadFromFile(string resultFilePath) { try { return JsonConvert.DeserializeObject<BdnResult>(File.ReadAllText(resultFilePath)); } catch (JsonSerializationException) { Console.WriteLine($"Exception while reading the {resultFilePath} file."); throw; } } // https://stackoverflow.com/a/6907849/5852046 not perfect but should work for all we need private static string WildcardToRegex(string pattern) => $"^{Regex.Escape(pattern).Replace(@"\*", ".*").Replace(@"\?", ".")}$"; } }
1
9,922
What will be an empty entry now?
dotnet-performance
.cs
@@ -71,6 +71,10 @@ export class ManualColumnResize extends BasePlugin { addClass(this.guide, 'manualColumnResizerGuide'); } + get inlineDir() { + return this.hot.isRtl() ? 'right' : 'left'; + } + /** * Checks if the plugin is enabled in the handsontable settings. This method is executed in {@link Hooks#beforeInit} * hook and if it returns `true` than the {@link ManualColumnResize#enablePlugin} method is called.
1
import { BasePlugin } from '../base'; import { addClass, closest, hasClass, removeClass, outerHeight, isDetached } from '../../helpers/dom/element'; import EventManager from '../../eventManager'; import { arrayEach } from '../../helpers/array'; import { rangeEach } from '../../helpers/number'; import { PhysicalIndexToValueMap as IndexToValueMap } from '../../translations'; // Developer note! Whenever you make a change in this file, make an analogous change in manualRowResize.js export const PLUGIN_KEY = 'manualColumnResize'; export const PLUGIN_PRIORITY = 130; const PERSISTENT_STATE_KEY = 'manualColumnWidths'; const privatePool = new WeakMap(); /** * @plugin ManualColumnResize * @class ManualColumnResize * * @description * This plugin allows to change columns width. To make columns width persistent the {@link Options#persistentState} * plugin should be enabled. * * The plugin creates additional components to make resizing possibly using user interface: * - handle - the draggable element that sets the desired width of the column. * - guide - the helper guide that shows the desired width as a vertical guide. */ export class ManualColumnResize extends BasePlugin { static get PLUGIN_KEY() { return PLUGIN_KEY; } static get PLUGIN_PRIORITY() { return PLUGIN_PRIORITY; } constructor(hotInstance) { super(hotInstance); const { rootDocument } = this.hot; this.currentTH = null; this.currentCol = null; this.selectedCols = []; this.currentWidth = null; this.newSize = null; this.startY = null; this.startWidth = null; this.startOffset = null; this.handle = rootDocument.createElement('DIV'); this.guide = rootDocument.createElement('DIV'); this.eventManager = new EventManager(this); this.pressed = null; this.dblclick = 0; this.autoresizeTimeout = null; /** * PhysicalIndexToValueMap to keep and track widths for physical column indexes. * * @private * @type {PhysicalIndexToValueMap} */ this.columnWidthsMap = void 0; /** * Private pool to save configuration from updateSettings. */ privatePool.set(this, { config: void 0, }); addClass(this.handle, 'manualColumnResizer'); addClass(this.guide, 'manualColumnResizerGuide'); } /** * Checks if the plugin is enabled in the handsontable settings. This method is executed in {@link Hooks#beforeInit} * hook and if it returns `true` than the {@link ManualColumnResize#enablePlugin} method is called. * * @returns {boolean} */ isEnabled() { return this.hot.getSettings()[PLUGIN_KEY]; } /** * Enables the plugin functionality for this Handsontable instance. */ enablePlugin() { if (this.enabled) { return; } this.columnWidthsMap = new IndexToValueMap(); this.columnWidthsMap.addLocalHook('init', () => this.onMapInit()); this.hot.columnIndexMapper.registerMap(this.pluginName, this.columnWidthsMap); this.addHook('modifyColWidth', (width, col) => this.onModifyColWidth(width, col)); this.addHook('beforeStretchingColumnWidth', (stretchedWidth, column) => this.onBeforeStretchingColumnWidth(stretchedWidth, column)); this.addHook('beforeColumnResize', (newSize, column, isDoubleClick) => this.onBeforeColumnResize(newSize, column, isDoubleClick)); this.bindEvents(); super.enablePlugin(); } /** * Updates the plugin state. This method is executed when {@link Core#updateSettings} is invoked. */ updatePlugin() { this.disablePlugin(); this.enablePlugin(); super.updatePlugin(); } /** * Disables the plugin functionality for this Handsontable instance. */ disablePlugin() { const priv = privatePool.get(this); priv.config = this.columnWidthsMap.getValues(); this.hot.columnIndexMapper.unregisterMap(this.pluginName); super.disablePlugin(); } /** * Saves the current sizes using the persistentState plugin (the {@link Options#persistentState} option has to be enabled). * * @fires Hooks#persistentStateSave */ saveManualColumnWidths() { this.hot.runHooks('persistentStateSave', PERSISTENT_STATE_KEY, this.columnWidthsMap.getValues()); } /** * Loads the previously saved sizes using the persistentState plugin (the {@link Options#persistentState} option has to be enabled). * * @returns {Array} * @fires Hooks#persistentStateLoad */ loadManualColumnWidths() { const storedState = {}; this.hot.runHooks('persistentStateLoad', PERSISTENT_STATE_KEY, storedState); return storedState.value; } /** * Sets the new width for specified column index. * * @param {number} column Visual column index. * @param {number} width Column width (no less than 20px). * @returns {number} Returns new width. */ setManualSize(column, width) { const newWidth = Math.max(width, 20); const physicalColumn = this.hot.toPhysicalColumn(column); this.columnWidthsMap.setValueAtIndex(physicalColumn, newWidth); return newWidth; } /** * Clears the cache for the specified column index. * * @param {number} column Visual column index. */ clearManualSize(column) { const physicalColumn = this.hot.toPhysicalColumn(column); this.columnWidthsMap.setValueAtIndex(physicalColumn, null); } /** * Callback to call on map's `init` local hook. * * @private */ onMapInit() { const priv = privatePool.get(this); const initialSetting = this.hot.getSettings()[PLUGIN_KEY]; const loadedManualColumnWidths = this.loadManualColumnWidths(); if (typeof loadedManualColumnWidths !== 'undefined') { this.hot.batchExecution(() => { loadedManualColumnWidths.forEach((width, physicalIndex) => { this.columnWidthsMap.setValueAtIndex(physicalIndex, width); }); }, true); } else if (Array.isArray(initialSetting)) { this.hot.batchExecution(() => { initialSetting.forEach((width, physicalIndex) => { this.columnWidthsMap.setValueAtIndex(physicalIndex, width); }); }, true); priv.config = initialSetting; } else if (initialSetting === true && Array.isArray(priv.config)) { this.hot.batchExecution(() => { priv.config.forEach((width, physicalIndex) => { this.columnWidthsMap.setValueAtIndex(physicalIndex, width); }); }, true); } } /** * Set the resize handle position. * * @private * @param {HTMLCellElement} TH TH HTML element. */ setupHandlePosition(TH) { if (!TH.parentNode) { return; } this.currentTH = TH; const { view: { wt } } = this.hot; const cellCoords = wt.wtTable.getCoords(this.currentTH); const col = cellCoords.col; // Ignore column headers. if (col < 0) { return; } const headerHeight = outerHeight(this.currentTH); const box = this.currentTH.getBoundingClientRect(); // Read "fixedColumnsStart" through the Walkontable as in that context, the fixed columns // are modified (reduced by the number of hidden columns) by TableView module. const fixedColumn = col < wt.getSetting('fixedColumnsStart'); let relativeHeaderPosition; if (fixedColumn) { relativeHeaderPosition = wt .wtOverlays .topInlineStartCornerOverlay .getRelativeCellPosition(this.currentTH, cellCoords.row, cellCoords.col); } // If the TH is not a child of the top-left overlay, recalculate using // the top overlay - as this overlay contains the rest of the headers. if (!relativeHeaderPosition) { relativeHeaderPosition = wt .wtOverlays .topOverlay .getRelativeCellPosition(this.currentTH, cellCoords.row, cellCoords.col); } this.currentCol = this.hot.columnIndexMapper.getVisualFromRenderableIndex(col); this.selectedCols = []; const isFullColumnSelected = this.hot.selection.isSelectedByCorner() || this.hot.selection.isSelectedByColumnHeader(); if (this.hot.selection.isSelected() && isFullColumnSelected) { const selectionRanges = this.hot.getSelectedRange(); arrayEach(selectionRanges, (selectionRange) => { const fromColumn = selectionRange.getTopLeftCorner().col; const toColumn = selectionRange.getBottomRightCorner().col; // Add every selected column for resize action. rangeEach(fromColumn, toColumn, (columnIndex) => { if (!this.selectedCols.includes(columnIndex)) { this.selectedCols.push(columnIndex); } }); }); } // Resizing element beyond the current selection (also when there is no selection). if (!this.selectedCols.includes(this.currentCol)) { this.selectedCols = [this.currentCol]; } this.startOffset = relativeHeaderPosition.left - 6; this.startWidth = parseInt(box.width, 10); this.handle.style.top = `${relativeHeaderPosition.top}px`; this.handle.style.left = `${this.startOffset + this.startWidth}px`; this.handle.style.height = `${headerHeight}px`; this.hot.rootElement.appendChild(this.handle); } /** * Refresh the resize handle position. * * @private */ refreshHandlePosition() { this.handle.style.left = `${this.startOffset + this.currentWidth}px`; } /** * Sets the resize guide position. * * @private */ setupGuidePosition() { const handleHeight = parseInt(outerHeight(this.handle), 10); const handleBottomPosition = parseInt(this.handle.style.top, 10) + handleHeight; const maximumVisibleElementHeight = parseInt(this.hot.view.maximumVisibleElementHeight(0), 10); addClass(this.handle, 'active'); addClass(this.guide, 'active'); this.guide.style.top = `${handleBottomPosition}px`; this.guide.style.left = this.handle.style.left; this.guide.style.height = `${maximumVisibleElementHeight - handleHeight}px`; this.hot.rootElement.appendChild(this.guide); } /** * Refresh the resize guide position. * * @private */ refreshGuidePosition() { this.guide.style.left = this.handle.style.left; } /** * Hides both the resize handle and resize guide. * * @private */ hideHandleAndGuide() { removeClass(this.handle, 'active'); removeClass(this.guide, 'active'); } /** * Checks if provided element is considered a column header. * * @private * @param {HTMLElement} element HTML element. * @returns {boolean} */ checkIfColumnHeader(element) { return !!closest(element, ['THEAD'], this.hot.rootElement); } /** * Gets the TH element from the provided element. * * @private * @param {HTMLElement} element HTML element. * @returns {HTMLElement} */ getClosestTHParent(element) { if (element.tagName !== 'TABLE') { if (element.tagName === 'TH') { return element; } return this.getClosestTHParent(element.parentNode); } return null; } /** * 'mouseover' event callback - set the handle position. * * @private * @param {MouseEvent} event The mouse event. */ onMouseOver(event) { // Workaround for #6926 - if the `event.target` is temporarily detached, we can skip this callback and wait for // the next `onmouseover`. if (isDetached(event.target)) { return; } if (this.checkIfColumnHeader(event.target)) { const th = this.getClosestTHParent(event.target); if (!th) { return; } const colspan = th.getAttribute('colspan'); if (th && (colspan === null || colspan === '1')) { if (!this.pressed) { this.setupHandlePosition(th); } } } } /** * Auto-size row after doubleclick - callback. * * @private * @fires Hooks#beforeColumnResize * @fires Hooks#afterColumnResize */ afterMouseDownTimeout() { const render = () => { this.hot.forceFullRender = true; this.hot.view.render(); // updates all this.hot.view.adjustElementsSize(true); }; const resize = (column, forceRender) => { const hookNewSize = this.hot.runHooks('beforeColumnResize', this.newSize, column, true); if (hookNewSize !== void 0) { this.newSize = hookNewSize; } if (this.hot.getSettings().stretchH === 'all') { this.clearManualSize(column); } else { this.setManualSize(column, this.newSize); // double click sets by auto row size plugin } this.saveManualColumnWidths(); this.hot.runHooks('afterColumnResize', this.newSize, column, true); if (forceRender) { render(); } }; if (this.dblclick >= 2) { const selectedColsLength = this.selectedCols.length; if (selectedColsLength > 1) { arrayEach(this.selectedCols, (selectedCol) => { resize(selectedCol); }); render(); } else { arrayEach(this.selectedCols, (selectedCol) => { resize(selectedCol, true); }); } } this.dblclick = 0; this.autoresizeTimeout = null; } /** * 'mousedown' event callback. * * @private * @param {MouseEvent} event The mouse event. */ onMouseDown(event) { if (hasClass(event.target, 'manualColumnResizer')) { this.setupHandlePosition(this.currentTH); this.setupGuidePosition(); this.pressed = true; if (this.autoresizeTimeout === null) { this.autoresizeTimeout = setTimeout(() => this.afterMouseDownTimeout(), 500); this.hot._registerTimeout(this.autoresizeTimeout); } this.dblclick += 1; this.startX = event.pageX; this.newSize = this.startWidth; } } /** * 'mousemove' event callback - refresh the handle and guide positions, cache the new column width. * * @private * @param {MouseEvent} event The mouse event. */ onMouseMove(event) { if (this.pressed) { this.currentWidth = this.startWidth + (event.pageX - this.startX); arrayEach(this.selectedCols, (selectedCol) => { this.newSize = this.setManualSize(selectedCol, this.currentWidth); }); this.refreshHandlePosition(); this.refreshGuidePosition(); } } /** * 'mouseup' event callback - apply the column resizing. * * @private * * @fires Hooks#beforeColumnResize * @fires Hooks#afterColumnResize */ onMouseUp() { const render = () => { this.hot.forceFullRender = true; this.hot.view.render(); // updates all this.hot.view.adjustElementsSize(true); }; const resize = (column, forceRender) => { this.hot.runHooks('beforeColumnResize', this.newSize, column, false); if (forceRender) { render(); } this.saveManualColumnWidths(); this.hot.runHooks('afterColumnResize', this.newSize, column, false); }; if (this.pressed) { this.hideHandleAndGuide(); this.pressed = false; if (this.newSize !== this.startWidth) { const selectedColsLength = this.selectedCols.length; if (selectedColsLength > 1) { arrayEach(this.selectedCols, (selectedCol) => { resize(selectedCol); }); render(); } else { arrayEach(this.selectedCols, (selectedCol) => { resize(selectedCol, true); }); } } this.setupHandlePosition(this.currentTH); } } /** * Binds the mouse events. * * @private */ bindEvents() { const { rootWindow, rootElement } = this.hot; this.eventManager.addEventListener(rootElement, 'mouseover', e => this.onMouseOver(e)); this.eventManager.addEventListener(rootElement, 'mousedown', e => this.onMouseDown(e)); this.eventManager.addEventListener(rootWindow, 'mousemove', e => this.onMouseMove(e)); this.eventManager.addEventListener(rootWindow, 'mouseup', () => this.onMouseUp()); } /** * Modifies the provided column width, based on the plugin settings. * * @private * @param {number} width Column width. * @param {number} column Visual column index. * @returns {number} */ onModifyColWidth(width, column) { let newWidth = width; if (this.enabled) { const physicalColumn = this.hot.toPhysicalColumn(column); const columnWidth = this.columnWidthsMap.getValueAtIndex(physicalColumn); if (this.hot.getSettings()[PLUGIN_KEY] && columnWidth) { newWidth = columnWidth; } } return newWidth; } /** * Modifies the provided column stretched width. This hook decides if specified column should be stretched or not. * * @private * @param {number} stretchedWidth Stretched width. * @param {number} column Visual column index. * @returns {number} */ onBeforeStretchingColumnWidth(stretchedWidth, column) { let width = this.columnWidthsMap.getValueAtIndex(column); if (width === null) { width = stretchedWidth; } return width; } /** * `beforeColumnResize` hook callback. * * @private */ onBeforeColumnResize() { // clear the header height cache information this.hot.view.wt.wtViewport.resetHasOversizedColumnHeadersMarked(); } /** * Destroys the plugin instance. */ destroy() { super.destroy(); } }
1
20,953
Can I ask you to add jsdoc tag that would mark the prop as `@private`?
handsontable-handsontable
js
@@ -113,3 +113,13 @@ func WriteFile(fromFile io.Reader, to string, mode os.FileMode) error { // And move it to its final destination. return os.Rename(tempFile.Name(), to) } + +// IsDirectory checks if a given path is a directory +func IsDirectory(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return info.IsDir() +}
1
// Package fs provides various filesystem helpers. package fs import ( "fmt" "io" "io/ioutil" "os" "path" "syscall" "gopkg.in/op/go-logging.v1" ) var log = logging.MustGetLogger("fs") // DirPermissions are the default permission bits we apply to directories. const DirPermissions = os.ModeDir | 0775 // EnsureDir ensures that the directory of the given file has been created. func EnsureDir(filename string) error { dir := path.Dir(filename) err := os.MkdirAll(dir, DirPermissions) if err != nil && FileExists(dir) { // It looks like this is a file and not a directory. Attempt to remove it; this can // happen in some cases if you change a rule from outputting a file to a directory. log.Warning("Attempting to remove file %s; a subdirectory is required", dir) if err2 := os.Remove(dir); err2 == nil { err = os.MkdirAll(dir, DirPermissions) } else { log.Error("%s", err2) } } return err } // PathExists returns true if the given path exists, as a file or a directory. func PathExists(filename string) bool { _, err := os.Lstat(filename) return err == nil } // FileExists returns true if the given path exists and is a file. func FileExists(filename string) bool { info, err := os.Lstat(filename) return err == nil && !info.IsDir() } // IsSymlink returns true if the given path exists and is a symlink. func IsSymlink(filename string) bool { info, err := os.Lstat(filename) return err == nil && (info.Mode()&os.ModeSymlink) != 0 } // IsSameFile returns true if two filenames describe the same underlying file (i.e. inode) func IsSameFile(a, b string) bool { i1, err1 := getInode(a) i2, err2 := getInode(b) return err1 == nil && err2 == nil && i1 == i2 } // getInode returns the inode of a file. func getInode(filename string) (uint64, error) { fi, err := os.Stat(filename) if err != nil { return 0, err } s, ok := fi.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("Not a syscall.Stat_t") } return uint64(s.Ino), nil } // CopyFile copies a file from 'from' to 'to', with an attempt to perform a copy & rename // to avoid chaos if anything goes wrong partway. func CopyFile(from string, to string, mode os.FileMode) error { fromFile, err := os.Open(from) if err != nil { return err } defer fromFile.Close() return WriteFile(fromFile, to, mode) } // WriteFile writes data from a reader to the file named 'to', with an attempt to perform // a copy & rename to avoid chaos if anything goes wrong partway. func WriteFile(fromFile io.Reader, to string, mode os.FileMode) error { if err := os.RemoveAll(to); err != nil { return err } dir, file := path.Split(to) if err := os.MkdirAll(dir, DirPermissions); err != nil { return err } tempFile, err := ioutil.TempFile(dir, file) if err != nil { return err } if _, err := io.Copy(tempFile, fromFile); err != nil { return err } if err := tempFile.Close(); err != nil { return err } // OK, now file is written; adjust permissions appropriately. if mode == 0 { mode = 0664 } if err := os.Chmod(tempFile.Name(), mode); err != nil { return err } // And move it to its final destination. return os.Rename(tempFile.Name(), to) }
1
8,599
Do you need this? Don't think you use it?
thought-machine-please
go
@@ -0,0 +1,13 @@ +package com.fsck.k9.widget.list; + + +import android.content.Intent; +import android.widget.RemoteViewsService; + + +public class MessageListWidgetService extends RemoteViewsService { + @Override + public RemoteViewsFactory onGetViewFactory(Intent intent) { + return new MessageListRemoteViewFactory(this.getApplicationContext()); + } +}
1
1
14,885
`this.` seems unnecessary
k9mail-k-9
java
@@ -244,8 +244,8 @@ def connect_to_service(service_name, client=True, env=None, region_name=None, en endpoint_url = backend_url config = config or botocore.client.Config() # configure S3 path style addressing - if service_name == 's3': - config.s3 = {'addressing_style': 'path'} + # if service_name == 's3': + # config.s3 = {'addressing_style': 'path'} # To, prevent error "Connection pool is full, discarding connection ...", # set the environment variable MAX_POOL_CONNECTIONS. Default is 150. config.max_pool_connections = MAX_POOL_CONNECTIONS
1
import os import re import json import time import boto3 import logging import six import botocore from localstack import config from localstack.constants import ( INTERNAL_AWS_ACCESS_KEY_ID, REGION_LOCAL, LOCALHOST, MOTO_ACCOUNT_ID, ENV_DEV, APPLICATION_AMZ_JSON_1_1, APPLICATION_AMZ_JSON_1_0, APPLICATION_X_WWW_FORM_URLENCODED, TEST_AWS_ACCOUNT_ID, MAX_POOL_CONNECTIONS, TEST_AWS_ACCESS_KEY_ID, TEST_AWS_SECRET_ACCESS_KEY) from localstack.utils.aws import templating from localstack.utils.common import ( run_safe, to_str, is_string, is_string_or_bytes, make_http_request, is_port_open, get_service_protocol, retry, to_bytes) from localstack.utils.aws.aws_models import KinesisStream # AWS environment variable names ENV_ACCESS_KEY = 'AWS_ACCESS_KEY_ID' ENV_SECRET_KEY = 'AWS_SECRET_ACCESS_KEY' ENV_SESSION_TOKEN = 'AWS_SESSION_TOKEN' # set up logger LOG = logging.getLogger(__name__) # cache local region LOCAL_REGION = None # Use this field if you want to provide a custom boto3 session. # This field takes priority over CREATE_NEW_SESSION_PER_BOTO3_CONNECTION CUSTOM_BOTO3_SESSION = None # Use this flag to enable creation of a new session for each boto3 connection. # This flag will be ignored if CUSTOM_BOTO3_SESSION is specified CREATE_NEW_SESSION_PER_BOTO3_CONNECTION = False # Used in AWS assume role function INITIAL_BOTO3_SESSION = None # Boto clients cache BOTO_CLIENTS_CACHE = {} # Assume role loop seconds DEFAULT_TIMER_LOOP_SECONDS = 60 * 50 # maps SQS queue ARNs to queue URLs SQS_ARN_TO_URL_CACHE = {} class Environment(object): def __init__(self, region=None, prefix=None): # target is the runtime environment to use, e.g., # 'local' for local mode self.region = region or get_local_region() # prefix can be 'prod', 'stg', 'uat-1', etc. self.prefix = prefix def apply_json(self, j): if isinstance(j, str): j = json.loads(j) self.__dict__.update(j) @staticmethod def from_string(s): parts = s.split(':') if len(parts) == 1: if s in PREDEFINED_ENVIRONMENTS: return PREDEFINED_ENVIRONMENTS[s] parts = [get_local_region(), s] if len(parts) > 2: raise Exception('Invalid environment string "%s"' % s) region = parts[0] prefix = parts[1] return Environment(region=region, prefix=prefix) @staticmethod def from_json(j): if not isinstance(j, dict): j = j.to_dict() result = Environment() result.apply_json(j) return result def __str__(self): return '%s:%s' % (self.region, self.prefix) PREDEFINED_ENVIRONMENTS = { ENV_DEV: Environment(region=REGION_LOCAL, prefix=ENV_DEV) } def get_environment(env=None, region_name=None): """ Return an Environment object based on the input arguments. Parameter `env` can be either of: * None (or empty), in which case the rules below are applied to (env = os.environ['ENV'] or ENV_DEV) * an Environment object (then this object is returned) * a string '<region>:<name>', which corresponds to Environment(region='<region>', prefix='<prefix>') * the predefined string 'dev' (ENV_DEV), which implies Environment(region='local', prefix='dev') * a string '<name>', which implies Environment(region=DEFAULT_REGION, prefix='<name>') Additionally, parameter `region_name` can be used to override DEFAULT_REGION. """ if not env: if 'ENV' in os.environ: env = os.environ['ENV'] else: env = ENV_DEV elif not is_string(env) and not isinstance(env, Environment): raise Exception('Invalid environment: %s' % env) if is_string(env): env = Environment.from_string(env) if region_name: env.region = region_name if not env.region: raise Exception('Invalid region in environment: "%s"' % env) return env def is_local_env(env): return not env or env.region == REGION_LOCAL or env.prefix == ENV_DEV class Boto3Session(boto3.session.Session): """ Custom boto3 session that points to local endpoint URLs. """ def resource(self, service, *args, **kwargs): self._fix_endpoint(kwargs) return connect_to_resource(service, *args, **kwargs) def client(self, service, *args, **kwargs): self._fix_endpoint(kwargs) return connect_to_service(service, *args, **kwargs) def _fix_endpoint(self, kwargs): if 'amazonaws.com' in kwargs.get('endpoint_url', ''): kwargs.pop('endpoint_url') def get_boto3_credentials(): global INITIAL_BOTO3_SESSION if CUSTOM_BOTO3_SESSION: return CUSTOM_BOTO3_SESSION.get_credentials() if not INITIAL_BOTO3_SESSION: INITIAL_BOTO3_SESSION = boto3.session.Session() try: return INITIAL_BOTO3_SESSION.get_credentials() except Exception: return boto3.session.Session().get_credentials() def get_boto3_session(cache=True): if cache and CUSTOM_BOTO3_SESSION: return CUSTOM_BOTO3_SESSION if not cache or CREATE_NEW_SESSION_PER_BOTO3_CONNECTION: return boto3.session.Session() # return default session return boto3 def get_region(): # TODO look up region from context return get_local_region() def get_local_region(): global LOCAL_REGION if LOCAL_REGION is None: session = boto3.session.Session() LOCAL_REGION = session.region_name or '' return config.DEFAULT_REGION or LOCAL_REGION def is_internal_call_context(headers): """ Return whether we are executing in the context of an internal API call, i.e., the case where one API uses a boto3 client to call another API internally. """ auth_header = headers.get('Authorization') or '' header_value = 'Credential=%s/' % INTERNAL_AWS_ACCESS_KEY_ID return header_value in auth_header def set_internal_auth(headers): authorization = headers.get('Authorization') or '' authorization = re.sub(r'Credential=[^/]+/', 'Credential=%s/' % INTERNAL_AWS_ACCESS_KEY_ID, authorization) headers['Authorization'] = authorization return headers def get_local_service_url(service_name_or_port): """ Return the local service URL for the given service name or port. """ if isinstance(service_name_or_port, int): return '%s://%s:%s' % (get_service_protocol(), LOCALHOST, service_name_or_port) service_name = service_name_or_port if service_name == 's3api': service_name = 's3' elif service_name == 'runtime.sagemaker': service_name = 'sagemaker-runtime' service_name_upper = service_name.upper().replace('-', '_').replace('.', '_') return os.environ['TEST_%s_URL' % service_name_upper] def is_service_enabled(service_name): """ Return whether the service with the given name (e.g., "lambda") is available. """ try: url = get_local_service_url(service_name) assert url return is_port_open(url, http_path='/', expect_success=False) except Exception: return False def connect_to_resource(service_name, env=None, region_name=None, endpoint_url=None, *args, **kwargs): """ Generic method to obtain an AWS service resource using boto3, based on environment, region, or custom endpoint_url. """ return connect_to_service(service_name, client=False, env=env, region_name=region_name, endpoint_url=endpoint_url) def connect_to_service(service_name, client=True, env=None, region_name=None, endpoint_url=None, config=None, verify=False, cache=True, *args, **kwargs): """ Generic method to obtain an AWS service client using boto3, based on environment, region, or custom endpoint_url. """ region_name = region_name or get_region() env = get_environment(env, region_name=region_name) region = env.region if env.region != REGION_LOCAL else region_name key_elements = [service_name, client, env, region, endpoint_url, config] cache_key = '/'.join([str(k) for k in key_elements]) if not cache or cache_key not in BOTO_CLIENTS_CACHE: # Cache clients, as this is a relatively expensive operation my_session = get_boto3_session(cache=cache) method = my_session.client if client else my_session.resource if not endpoint_url: if is_local_env(env): endpoint_url = get_local_service_url(service_name) verify = False backend_env_name = '%s_BACKEND' % service_name.upper() backend_url = os.environ.get(backend_env_name, '').strip() if backend_url: endpoint_url = backend_url config = config or botocore.client.Config() # configure S3 path style addressing if service_name == 's3': config.s3 = {'addressing_style': 'path'} # To, prevent error "Connection pool is full, discarding connection ...", # set the environment variable MAX_POOL_CONNECTIONS. Default is 150. config.max_pool_connections = MAX_POOL_CONNECTIONS result = method(service_name, region_name=region, endpoint_url=endpoint_url, verify=verify, config=config) if not cache: return result BOTO_CLIENTS_CACHE[cache_key] = result return BOTO_CLIENTS_CACHE[cache_key] # TODO remove from here in the future def render_velocity_template(*args, **kwargs): return templating.render_velocity_template(*args, **kwargs) def generate_presigned_url(*args, **kwargs): id_before = os.environ.get(ENV_ACCESS_KEY) key_before = os.environ.get(ENV_SECRET_KEY) try: # Note: presigned URL needs to be created with test credentials os.environ[ENV_ACCESS_KEY] = TEST_AWS_ACCESS_KEY_ID os.environ[ENV_SECRET_KEY] = TEST_AWS_SECRET_ACCESS_KEY s3_client = connect_to_service('s3', cache=False) return s3_client.generate_presigned_url(*args, **kwargs) finally: if id_before: os.environ[ENV_ACCESS_KEY] = id_before if key_before: os.environ[ENV_SECRET_KEY] = key_before def check_valid_region(headers): """ Check whether a valid region is provided, and if not then raise an Exception. """ auth_header = headers.get('Authorization') if not auth_header: raise Exception('Unable to find "Authorization" header in request') replaced = re.sub(r'.*Credential=([^,]+),.*', r'\1', auth_header) if auth_header == replaced: raise Exception('Unable to find "Credential" section in "Authorization" header') # Format is: <your-access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request # See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html parts = replaced.split('/') region = parts[2] if region not in config.VALID_REGIONS: raise Exception('Invalid region specified in "Authorization" header: "%s"' % region) def set_default_region_in_headers(headers, service=None, region=None): auth_header = headers.get('Authorization') region = region or get_region() if not auth_header: if service: headers['Authorization'] = mock_aws_request_headers(service, region_name=region)['Authorization'] return replaced = re.sub(r'(.*Credential=[^/]+/[^/]+/)([^/])+/', r'\1%s/' % region, auth_header) headers['Authorization'] = replaced def fix_account_id_in_arns(response, colon_delimiter=':', existing=None, replace=None): """ Fix the account ID in the ARNs returned in the given Flask response or string """ existing = existing or ['123456789', '1234567890', '123456789012', MOTO_ACCOUNT_ID] existing = existing if isinstance(existing, list) else [existing] replace = replace or TEST_AWS_ACCOUNT_ID is_str_obj = is_string_or_bytes(response) content = to_str(response if is_str_obj else response._content) replace = r'arn{col}aws{col}\1{col}\2{col}{acc}{col}'.format(col=colon_delimiter, acc=replace) for acc_id in existing: regex = r'arn{col}aws{col}([^:%]+){col}([^:%]*){col}{acc}{col}'.format(col=colon_delimiter, acc=acc_id) content = re.sub(regex, replace, content) if not is_str_obj: response._content = content response.headers['Content-Length'] = len(response._content) return response return content def inject_test_credentials_into_env(env): env = env or {} if ENV_ACCESS_KEY not in env and ENV_SECRET_KEY not in env: env[ENV_ACCESS_KEY] = 'test' env[ENV_SECRET_KEY] = 'test' def inject_region_into_env(env, region): env['AWS_REGION'] = region def sqs_queue_url_for_arn(queue_arn): if '://' in queue_arn: return queue_arn if queue_arn in SQS_ARN_TO_URL_CACHE: return SQS_ARN_TO_URL_CACHE[queue_arn] sqs_client = connect_to_service('sqs') parts = queue_arn.split(':') result = sqs_client.get_queue_url(QueueName=parts[5], QueueOwnerAWSAccountId=parts[4])['QueueUrl'] SQS_ARN_TO_URL_CACHE[queue_arn] = result return result def extract_region_from_auth_header(headers): auth = headers.get('Authorization') or '' region = re.sub(r'.*Credential=[^/]+/[^/]+/([^/]+)/.*', r'\1', auth) region = region or get_region() return region def extract_region_from_arn(arn): parts = arn.split(':') return parts[3] if len(parts) > 1 else None def extract_service_from_arn(arn): parts = arn.split(':') return parts[2] if len(parts) > 1 else None def get_account_id(account_id=None, env=None): if account_id: return account_id env = get_environment(env) if is_local_env(env): return os.environ['TEST_AWS_ACCOUNT_ID'] raise Exception('Unable to determine AWS account ID (%s, %s)' % (account_id, env)) def role_arn(role_name, account_id=None, env=None): if not role_name: return role_name if role_name.startswith('arn:aws:iam::'): return role_name env = get_environment(env) account_id = get_account_id(account_id, env=env) return 'arn:aws:iam::%s:role/%s' % (account_id, role_name) def policy_arn(policy_name, account_id=None): if ':policy/' in policy_name: return policy_name account_id = account_id or TEST_AWS_ACCOUNT_ID return 'arn:aws:iam::{}:policy/{}'.format(account_id, policy_name) def iam_resource_arn(resource, role=None, env=None): env = get_environment(env) if not role: role = get_iam_role(resource, env=env) return role_arn(role_name=role, account_id=get_account_id()) def get_iam_role(resource, env=None): env = get_environment(env) return 'role-%s' % resource def secretsmanager_secret_arn(secret_name, account_id=None, region_name=None): pattern = 'arn:aws:secretsmanager:%s:%s:secret:%s' return _resource_arn(secret_name, pattern, account_id=account_id, region_name=region_name) def cloudformation_stack_arn(stack_name, stack_id=None, account_id=None, region_name=None): stack_id = stack_id or 'id-123' pattern = 'arn:aws:cloudformation:%s:%s:stack/%s/{stack_id}'.format(stack_id=stack_id) return _resource_arn(stack_name, pattern, account_id=account_id, region_name=region_name) def cf_change_set_arn(change_set_name, change_set_id=None, account_id=None, region_name=None): change_set_id = change_set_id or 'id-456' pattern = 'arn:aws:cloudformation:%s:%s:changeSet/%s/{cs_id}'.format(cs_id=change_set_id) return _resource_arn(change_set_name, pattern, account_id=account_id, region_name=region_name) def dynamodb_table_arn(table_name, account_id=None, region_name=None): table_name = table_name.split(':table/')[-1] pattern = 'arn:aws:dynamodb:%s:%s:table/%s' return _resource_arn(table_name, pattern, account_id=account_id, region_name=region_name) def dynamodb_stream_arn(table_name, latest_stream_label, account_id=None): account_id = get_account_id(account_id) return ('arn:aws:dynamodb:%s:%s:table/%s/stream/%s' % (get_region(), account_id, table_name, latest_stream_label)) def cloudwatch_alarm_arn(alarm_name, account_id=None, region_name=None): pattern = 'arn:aws:cloudwatch:%s:%s:alarm:%s' return _resource_arn(alarm_name, pattern, account_id=account_id, region_name=region_name) def log_group_arn(group_name, account_id=None, region_name=None): pattern = 'arn:aws:logs:%s:%s:log-group:%s' return _resource_arn(group_name, pattern, account_id=account_id, region_name=region_name) def events_rule_arn(rule_name, account_id=None, region_name=None): pattern = 'arn:aws:events:%s:%s:rule/%s' return _resource_arn(rule_name, pattern, account_id=account_id, region_name=region_name) def lambda_function_arn(function_name, account_id=None, region_name=None): return lambda_function_or_layer_arn('function', function_name, account_id=account_id, region_name=region_name) def lambda_layer_arn(layer_name, version=None, account_id=None): return lambda_function_or_layer_arn('layer', layer_name, version=None, account_id=account_id) def lambda_function_or_layer_arn(type, entity_name, version=None, account_id=None, region_name=None): pattern = 'arn:aws:lambda:.*:.*:(function|layer):.*' if re.match(pattern, entity_name): return entity_name if ':' in entity_name: raise Exception('Lambda %s name should not contain a colon ":": %s' % (type, entity_name)) account_id = get_account_id(account_id) region_name = region_name or get_region() pattern = re.sub(r'\([^\|]+\|.+\)', type, pattern) result = pattern.replace('.*', '%s') % (region_name, account_id, entity_name) if version: result = '%s:%s' % (result, version) return result def lambda_function_name(name_or_arn): if ':' not in name_or_arn: return name_or_arn parts = name_or_arn.split(':') # name is index #6 in pattern: arn:aws:lambda:.*:.*:function:.* return parts[6] def state_machine_arn(name, account_id=None, region_name=None): pattern = 'arn:aws:states:%s:%s:stateMachine:%s' return _resource_arn(name, pattern, account_id=account_id, region_name=region_name) def stepfunctions_activity_arn(name, account_id=None, region_name=None): pattern = 'arn:aws:states:%s:%s:activity:%s' return _resource_arn(name, pattern, account_id=account_id, region_name=region_name) def fix_arn(arn): """ Function that attempts to "canonicalize" the given ARN. This includes converting resource names to ARNs, replacing incorrect regions, account IDs, etc. """ if arn.startswith('arn:aws:lambda'): parts = arn.split(':') region = parts[3] if parts[3] in config.VALID_REGIONS else get_region() return lambda_function_arn(lambda_function_name(arn), region_name=region) LOG.warning('Unable to fix/canonicalize ARN: %s' % arn) return arn def cognito_user_pool_arn(user_pool_id, account_id=None, region_name=None): pattern = 'arn:aws:cognito-idp:%s:%s:userpool/%s' return _resource_arn(user_pool_id, pattern, account_id=account_id, region_name=region_name) def kinesis_stream_arn(stream_name, account_id=None, region_name=None): pattern = 'arn:aws:kinesis:%s:%s:stream/%s' return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name) def elasticsearch_domain_arn(domain_name, account_id=None, region_name=None): pattern = 'arn:aws:es:%s:%s:domain/%s' return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name) def firehose_stream_arn(stream_name, account_id=None, region_name=None): pattern = 'arn:aws:firehose:%s:%s:deliverystream/%s' return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name) def es_domain_arn(domain_name, account_id=None, region_name=None): pattern = 'arn:aws:es:%s:%s:domain/%s' return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name) def code_signing_arn(code_signing_id, account_id=None, region_name=None): pattern = 'arn:aws:lambda:%s:%s:code-signing-config:%s' return _resource_arn(code_signing_id, pattern, account_id=account_id, region_name=region_name) def s3_bucket_arn(bucket_name, account_id=None): return 'arn:aws:s3:::%s' % (bucket_name) def _resource_arn(name, pattern, account_id=None, region_name=None): if ':' in name: return name account_id = get_account_id(account_id) region_name = region_name or get_region() return pattern % (region_name, account_id, name) def send_event_to_target(arn, event, target_attributes=None, asynchronous=True): region = arn.split(':')[3] if ':lambda:' in arn: from localstack.services.awslambda import lambda_api lambda_api.run_lambda(event=event, context={}, func_arn=arn, asynchronous=asynchronous) elif ':sns:' in arn: sns_client = connect_to_service('sns', region_name=region) sns_client.publish(TopicArn=arn, Message=json.dumps(event)) elif ':sqs:' in arn: sqs_client = connect_to_service('sqs', region_name=region) queue_url = get_sqs_queue_url(arn) msg_group_id = (target_attributes or {}).get('MessageGroupId') kwargs = {'MessageGroupId': msg_group_id} if msg_group_id else {} sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event), **kwargs) elif ':states:' in arn: stepfunctions_client = connect_to_service('stepfunctions', region_name=region) stepfunctions_client.start_execution(stateMachineArn=arn, input=json.dumps(event)) elif ':firehose:' in arn: delivery_stream_name = firehose_name(arn) firehose_client = connect_to_service('firehose', region_name=region) firehose_client.put_record( DeliveryStreamName=delivery_stream_name, Record={'Data': to_bytes(json.dumps(event))}) elif ':events:' in arn: bus_name = arn.split(':')[-1].split('/')[-1] events_client = connect_to_service('events', region_name=region) events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': event.get('source'), 'DetailType': event.get('detail-type'), 'Detail': event.get('detail') }] ) else: LOG.warning('Unsupported Events rule target ARN: "%s"' % arn) def get_events_target_attributes(target): # TODO: add support for other target types return target.get('SqsParameters') def create_sqs_queue(queue_name, env=None): env = get_environment(env) # queue conn = connect_to_service('sqs', env=env) return conn.create_queue(QueueName=queue_name) def sqs_queue_arn(queue_name, account_id=None, region_name=None): account_id = get_account_id(account_id) region_name = region_name or get_region() queue_name = queue_name.split('/')[-1] return ('arn:aws:sqs:%s:%s:%s' % (region_name, account_id, queue_name)) def apigateway_restapi_arn(api_id, account_id=None, region_name=None): account_id = get_account_id(account_id) region_name = region_name or get_region() return ('arn:aws:apigateway:%s:%s:/restapis/%s' % (region_name, account_id, api_id)) def sqs_queue_name(queue_arn): parts = queue_arn.split(':') return queue_arn if len(parts) == 1 else parts[5] def sns_topic_arn(topic_name, account_id=None): account_id = get_account_id(account_id) return ('arn:aws:sns:%s:%s:%s' % (get_region(), account_id, topic_name)) def get_sqs_queue_url(queue_arn): region_name = extract_region_from_arn(queue_arn) queue_name = sqs_queue_name(queue_arn) client = connect_to_service('sqs', region_name=region_name) response = client.get_queue_url(QueueName=queue_name) return response['QueueUrl'] def sqs_receive_message(queue_arn): region_name = extract_region_from_arn(queue_arn) client = connect_to_service('sqs', region_name=region_name) queue_url = get_sqs_queue_url(queue_arn) response = client.receive_message(QueueUrl=queue_url) return response def firehose_name(firehose_arn): return firehose_arn.split('/')[-1] def kinesis_stream_name(kinesis_arn): return kinesis_arn.split(':stream/')[-1] def mock_aws_request_headers(service='dynamodb', region_name=None): ctype = APPLICATION_AMZ_JSON_1_0 if service == 'kinesis': ctype = APPLICATION_AMZ_JSON_1_1 elif service in ['sns', 'sqs']: ctype = APPLICATION_X_WWW_FORM_URLENCODED access_key = get_boto3_credentials().access_key region_name = region_name or get_region() headers = { 'Content-Type': ctype, 'Accept-Encoding': 'identity', 'X-Amz-Date': '20160623T103251Z', 'Authorization': ('AWS4-HMAC-SHA256 ' + 'Credential=%s/20160623/%s/%s/aws4_request, ' + 'SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234') % ( access_key, region_name, service) } return headers def dynamodb_get_item_raw(request): headers = mock_aws_request_headers() headers['X-Amz-Target'] = 'DynamoDB_20120810.GetItem' new_item = make_http_request(url=config.TEST_DYNAMODB_URL, method='POST', data=json.dumps(request), headers=headers) new_item = new_item.text new_item = new_item and json.loads(new_item) return new_item def create_dynamodb_table(table_name, partition_key, env=None, stream_view_type=None, region_name=None, client=None): """ Utility method to create a DynamoDB table """ dynamodb = client or connect_to_service('dynamodb', env=env, client=True, region_name=region_name) stream_spec = {'StreamEnabled': False} key_schema = [{ 'AttributeName': partition_key, 'KeyType': 'HASH' }] attr_defs = [{ 'AttributeName': partition_key, 'AttributeType': 'S' }] if stream_view_type is not None: stream_spec = { 'StreamEnabled': True, 'StreamViewType': stream_view_type } table = None try: table = dynamodb.create_table(TableName=table_name, KeySchema=key_schema, AttributeDefinitions=attr_defs, ProvisionedThroughput={ 'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10 }, StreamSpecification=stream_spec ) except Exception as e: if 'ResourceInUseException' in str(e): # Table already exists -> return table reference return connect_to_resource('dynamodb', env=env, region_name=region_name).Table(table_name) if 'AccessDeniedException' in str(e): raise time.sleep(2) return table def get_apigateway_integration(api_id, method, path, env=None): apigateway = connect_to_service(service_name='apigateway', client=True, env=env) resources = apigateway.get_resources(restApiId=api_id, limit=100) resource_id = None for r in resources['items']: if r['path'] == path: resource_id = r['id'] if not resource_id: raise Exception('Unable to find apigateway integration for path "%s"' % path) integration = apigateway.get_integration( restApiId=api_id, resourceId=resource_id, httpMethod=method ) return integration def get_apigateway_resource_for_path(api_id, path, parent=None, resources=None): if resources is None: apigateway = connect_to_service(service_name='apigateway') resources = apigateway.get_resources(restApiId=api_id, limit=100) if not isinstance(path, list): path = path.split('/') if not path: return parent for resource in resources: if resource['pathPart'] == path[0] and (not parent or parent['id'] == resource['parentId']): return get_apigateway_resource_for_path(api_id, path[1:], parent=resource, resources=resources) return None def get_apigateway_path_for_resource(api_id, resource_id, path_suffix='', resources=None, region_name=None): if resources is None: apigateway = connect_to_service(service_name='apigateway', region_name=region_name) resources = apigateway.get_resources(restApiId=api_id, limit=100)['items'] target_resource = list(filter(lambda res: res['id'] == resource_id, resources))[0] path_part = target_resource.get('pathPart', '') if path_suffix: if path_part: path_suffix = '%s/%s' % (path_part, path_suffix) else: path_suffix = path_part parent_id = target_resource.get('parentId') if not parent_id: return '/%s' % path_suffix return get_apigateway_path_for_resource(api_id, parent_id, path_suffix=path_suffix, resources=resources, region_name=region_name) def create_api_gateway(name, description=None, resources=None, stage_name=None, enabled_api_keys=[], env=None, usage_plan_name=None, region_name=None): client = connect_to_service('apigateway', env=env, region_name=region_name) if not resources: resources = [] if not stage_name: stage_name = 'testing' if not usage_plan_name: usage_plan_name = 'Basic Usage' if not description: description = 'Test description for API "%s"' % name LOG.info('Creating API resources under API Gateway "%s".' % name) api = client.create_rest_api(name=name, description=description) # list resources api_id = api['id'] resources_list = client.get_resources(restApiId=api_id) root_res_id = resources_list['items'][0]['id'] # add API resources and methods for path, methods in six.iteritems(resources): # create resources recursively parent_id = root_res_id for path_part in path.split('/'): api_resource = client.create_resource(restApiId=api_id, parentId=parent_id, pathPart=path_part) parent_id = api_resource['id'] # add methods to the API resource for method in methods: client.put_method( restApiId=api_id, resourceId=api_resource['id'], httpMethod=method['httpMethod'], authorizationType=method.get('authorizationType') or 'NONE', apiKeyRequired=method.get('apiKeyRequired') or False ) # create integrations for this API resource/method integrations = method['integrations'] create_api_gateway_integrations(api_id, api_resource['id'], method, integrations, env=env, region_name=region_name) # deploy the API gateway client.create_deployment(restApiId=api_id, stageName=stage_name) return api def create_api_gateway_integrations(api_id, resource_id, method, integrations=[], env=None, region_name=None): client = connect_to_service('apigateway', env=env, region_name=region_name) for integration in integrations: req_templates = integration.get('requestTemplates') or {} res_templates = integration.get('responseTemplates') or {} success_code = integration.get('successCode') or '200' client_error_code = integration.get('clientErrorCode') or '400' server_error_code = integration.get('serverErrorCode') or '500' # create integration client.put_integration( restApiId=api_id, resourceId=resource_id, httpMethod=method['httpMethod'], integrationHttpMethod=method.get('integrationHttpMethod') or method['httpMethod'], type=integration['type'], uri=integration['uri'], requestTemplates=req_templates ) response_configs = [ {'pattern': '^2.*', 'code': success_code, 'res_templates': res_templates}, {'pattern': '^4.*', 'code': client_error_code, 'res_templates': {}}, {'pattern': '^5.*', 'code': server_error_code, 'res_templates': {}} ] # create response configs for response_config in response_configs: # create integration response client.put_integration_response( restApiId=api_id, resourceId=resource_id, httpMethod=method['httpMethod'], statusCode=response_config['code'], responseTemplates=response_config['res_templates'], selectionPattern=response_config['pattern'] ) # create method response client.put_method_response( restApiId=api_id, resourceId=resource_id, httpMethod=method['httpMethod'], statusCode=response_config['code'] ) def apigateway_invocations_arn(lambda_uri): return ('arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations' % (get_region(), lambda_uri)) def get_elasticsearch_endpoint(domain=None, region_name=None): env = get_environment(region_name=region_name) if is_local_env(env): return os.environ['TEST_ELASTICSEARCH_URL'] # get endpoint from API es_client = connect_to_service(service_name='es', region_name=env.region) info = es_client.describe_elasticsearch_domain(DomainName=domain) endpoint = 'https://%s' % info['DomainStatus']['Endpoint'] return endpoint def connect_elasticsearch(endpoint=None, domain=None, region_name=None, env=None): from elasticsearch import Elasticsearch, RequestsHttpConnection from requests_aws4auth import AWS4Auth env = get_environment(env, region_name=region_name) verify_certs = False use_ssl = False if not endpoint and is_local_env(env): endpoint = os.environ['TEST_ELASTICSEARCH_URL'] if not endpoint and not is_local_env(env) and domain: endpoint = get_elasticsearch_endpoint(domain=domain, region_name=env.region) # use ssl? if 'https://' in endpoint: use_ssl = True if not is_local_env(env): verify_certs = True if CUSTOM_BOTO3_SESSION or (ENV_ACCESS_KEY in os.environ and ENV_SECRET_KEY in os.environ): access_key = os.environ.get(ENV_ACCESS_KEY) secret_key = os.environ.get(ENV_SECRET_KEY) session_token = os.environ.get(ENV_SESSION_TOKEN) if CUSTOM_BOTO3_SESSION: credentials = CUSTOM_BOTO3_SESSION.get_credentials() access_key = credentials.access_key secret_key = credentials.secret_key session_token = credentials.token awsauth = AWS4Auth(access_key, secret_key, env.region, 'es', session_token=session_token) connection_class = RequestsHttpConnection return Elasticsearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl, connection_class=connection_class, http_auth=awsauth) return Elasticsearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl) def create_kinesis_stream(stream_name, shards=1, env=None, delete=False): env = get_environment(env) # stream stream = KinesisStream(id=stream_name, num_shards=shards) conn = connect_to_service('kinesis', env=env) stream.connect(conn) if delete: run_safe(lambda: stream.destroy(), print_error=False) stream.create() # Note: Returning the stream without awaiting its creation (via wait_for()) to avoid API call timeouts/retries. return stream def kinesis_get_latest_records(stream_name, shard_id, count=10, env=None): kinesis = connect_to_service('kinesis', env=env) result = [] response = kinesis.get_shard_iterator(StreamName=stream_name, ShardId=shard_id, ShardIteratorType='TRIM_HORIZON') shard_iterator = response['ShardIterator'] while shard_iterator: records_response = kinesis.get_records(ShardIterator=shard_iterator) records = records_response['Records'] for record in records: try: record['Data'] = to_str(record['Data']) except Exception: pass result.extend(records) shard_iterator = records_response['NextShardIterator'] if records else False while len(result) > count: result.pop(0) return result def get_stack_details(stack_name): cloudformation = connect_to_service('cloudformation') stacks = cloudformation.describe_stacks(StackName=stack_name) for stack in stacks['Stacks']: if stack['StackName'] == stack_name: return stack def deploy_cf_stack(stack_name, template_body): cfn = connect_to_service('cloudformation') cfn.create_stack(StackName=stack_name, TemplateBody=template_body) # wait for deployment to finish return await_stack_completion(stack_name) def await_stack_status(stack_name, expected_statuses, retries=3, sleep=2): def check_stack(): stack = get_stack_details(stack_name) assert stack['StackStatus'] in expected_statuses return stack expected_statuses = expected_statuses if isinstance(expected_statuses, list) else [expected_statuses] return retry(check_stack, retries, sleep) def await_stack_completion(stack_name, retries=3, sleep=2, statuses=None): statuses = statuses or ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] return await_stack_status(stack_name, statuses, retries=retries, sleep=sleep) # TODO: move to aws_responses.py? def extract_tags(req_data): tags = [] for i in range(1, 200): k1 = 'Tags.member.%s.Key' % i k2 = 'Tags.member.%s.Value' % i key = req_data.get(k1) value = req_data.get(k2, '') if key is None: break tags.append({'Key': key, 'Value': value}) return tags
1
12,182
nit: can be removed before merging...
localstack-localstack
py
@@ -2,7 +2,7 @@ describe('ContextMenu', function () { var id = 'testContainer'; beforeEach(function () { - this.$container = $('<div id="' + id + '"></div>').appendTo('body'); + this.$container = $(`<div id="${id}"></div>`).appendTo('body'); }); afterEach(function () {
1
describe('ContextMenu', function () { var id = 'testContainer'; beforeEach(function () { this.$container = $('<div id="' + id + '"></div>').appendTo('body'); }); afterEach(function () { if (this.$container) { destroy(); this.$container.remove(); } }); describe('alignment', function() { it('should align text left', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(0); button.simulate('mousedown'); // Text left expect(getCellMeta(0, 0).className).toEqual('htLeft'); expect(getCell(0, 0).className).toContain('htLeft'); done(); }, 350); }); it('should align text center', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(1); button.simulate('mousedown'); // Text center expect(getCellMeta(0, 0).className).toEqual('htCenter'); expect(getCell(0, 0).className).toContain('htCenter'); done(); }, 350); }); it('should align text right', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(2); button.simulate('mousedown'); // Text right expect(getCellMeta(0, 0).className).toEqual('htRight'); expect(getCell(0, 0).className).toContain('htRight'); done(); }, 350); }); it('should justify text', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(3); button.simulate('mousedown'); // Text justify deselectCell(); expect(getCellMeta(0, 0).className).toEqual('htJustify'); expect(getCell(0, 0).className).toContain('htJustify'); done(); }, 350); // menu opens after 300ms }); it('should vertical align text top', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(4); button.simulate('mousedown'); // Text top deselectCell(); expect(getCellMeta(0, 0).className).toEqual('htTop'); expect(getCell(0, 0).className).toContain('htTop'); done(); }, 350); // menu opens after 300ms }); it('should vertical align text middle', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(5); button.simulate('mousedown'); // Text middle deselectCell(); expect(getCellMeta(0, 0).className).toEqual('htMiddle'); expect(getCell(0, 0).className).toContain('htMiddle'); done(); }, 350); // menu opens after 300ms }); it('should vertical align text bottom', function (done) { var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(4, 4), contextMenu: true, height: 100 }); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(6); button.simulate('mousedown'); // Text bottom deselectCell(); expect(getCellMeta(0, 0).className).toEqual('htBottom'); expect(getCell(0, 0).className).toContain('htBottom'); done(); }, 350); // menu opens after 300ms }); it('should trigger `afterSetCellMeta` callback after changing alignment by context menu', function (done) { var afterSetCellMetaCallback = jasmine.createSpy('afterSetCellMetaCallback'); var hot = handsontable({ data: Handsontable.helper.createSpreadsheetData(5, 5), rowHeaders: true, colHeaders: true, contextMenu: true, afterSetCellMeta: afterSetCellMetaCallback }); selectCell(2, 3); contextMenu(); var item = $('.htContextMenu .ht_master .htCore').find('tbody td').not('.htSeparator').eq(9); item.simulate('mouseover'); setTimeout(function () { var contextSubMenu = $('.htContextMenuSub_' + item.text()); var button = contextSubMenu.find('.ht_master .htCore tbody td').not('.htSeparator').eq(2); button.simulate('mousedown'); // Text bottom deselectCell(); expect(afterSetCellMetaCallback).toHaveBeenCalledWith(2, 3, 'className', 'htRight', undefined, undefined); done(); }, 350); // menu opens after 300ms }); }); });
1
14,896
Maybe a single quote would be compatible with airbnb style.
handsontable-handsontable
js
@@ -81,6 +81,10 @@ public abstract class SessionMap implements HasReadyState, Routable { public abstract void remove(SessionId id); + public int getCount() { + return -10; + }; + public URI getUri(SessionId id) throws NoSuchSessionException { return get(id).getUri(); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.sessionmap; import org.openqa.selenium.NoSuchSessionException; import org.openqa.selenium.grid.data.Session; import org.openqa.selenium.internal.Require; import org.openqa.selenium.json.Json; import org.openqa.selenium.remote.SessionId; import org.openqa.selenium.remote.http.HttpHandler; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.remote.http.Routable; import org.openqa.selenium.remote.http.Route; import org.openqa.selenium.remote.tracing.Tracer; import org.openqa.selenium.status.HasReadyState; import java.net.URI; import java.util.Map; import static org.openqa.selenium.remote.http.Route.combine; import static org.openqa.selenium.remote.http.Route.delete; import static org.openqa.selenium.remote.http.Route.post; /** * Provides a stable API for looking up where on the Grid a particular webdriver instance is * running. * <p> * This class responds to the following URLs: * <table summary="HTTP commands the SessionMap understands"> * <tr> * <th>Verb</th> * <th>URL Template</th> * <th>Meaning</th> * </tr> * <tr> * <td>DELETE</td> * <td>/se/grid/session/{sessionId}</td> * <td>Removes a {@link URI} from the session map. Calling this method more than once for the same * {@link SessionId} will not throw an error.</td> * </tr> * <tr> * <td>GET</td> * <td>/se/grid/session/{sessionId}</td> * <td>Retrieves the {@link URI} associated the {@link SessionId}, or throws a * {@link org.openqa.selenium.NoSuchSessionException} should the session not be present.</td> * </tr> * <tr> * <td>POST</td> * <td>/se/grid/session/{sessionId}</td> * <td>Registers the session with session map. In theory, the session map never expires a session * from its mappings, but realistically, sessions may end up being removed for many reasons. * </td> * </tr> * </table> */ public abstract class SessionMap implements HasReadyState, Routable { protected final Tracer tracer; private final Route routes; public abstract boolean add(Session session); public abstract Session get(SessionId id) throws NoSuchSessionException; public abstract void remove(SessionId id); public URI getUri(SessionId id) throws NoSuchSessionException { return get(id).getUri(); } public SessionMap(Tracer tracer) { this.tracer = Require.nonNull("Tracer", tracer); Json json = new Json(); routes = combine( Route.get("/se/grid/session/{sessionId}/uri") .to(params -> new GetSessionUri(this, sessionIdFrom(params))), post("/se/grid/session") .to(() -> new AddToSessionMap(tracer, json, this)), Route.get("/se/grid/session/{sessionId}") .to(params -> new GetFromSessionMap(tracer, this, sessionIdFrom(params))), delete("/se/grid/session/{sessionId}") .to(params -> new RemoveFromSession(tracer, this, sessionIdFrom(params)))); } private SessionId sessionIdFrom(Map<String, String> params) { return new SessionId(params.get("sessionId")); } @Override public boolean matches(HttpRequest req) { return routes.matches(req); } @Override public HttpResponse execute(HttpRequest req) { return routes.execute(req); } }
1
17,774
This is not the right approach. The `Distributor` maintains a model of the current state of the Grid. That model already contains the information about every active session. We don't need to modify `SessionMap` to expose it further.
SeleniumHQ-selenium
java
@@ -279,11 +279,9 @@ func TestFormatWithComments(t *testing.T) { if want := `// hi // there -{ - _time: r._time, - io_time: r._value, -// this is the end -} +{_time: r._time, io_time: r._value + // this is the end + } // minimal foo = (arg=[1, 2]) => 1
1
package astutil_test import ( "testing" "github.com/influxdata/flux/ast" "github.com/influxdata/flux/ast/astutil" "github.com/influxdata/flux/parser" ) func TestFormat(t *testing.T) { src := `x=1+2` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `x = 1 + 2`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithCommentsBase(t *testing.T) { src := `// add two numbers x=1+2` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `// add two numbers x = 1 + 2`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithCommentsDict(t *testing.T) { src := `[ "a": 0, //comment "b": 1, ]` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `[ "a": 0, //comment "b": 1, ]`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithCommentsParens(t *testing.T) { src := `// comment\n(1 * 1)` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `// comment\n(1 * 1)`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithCommentsColon(t *testing.T) { src := `// Comment builtin foo // colon comment : int` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `// Comment builtin foo // colon comment : int`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithCommentsUnaryExpressions(t *testing.T) { src := `// define a a = 5.0 // eval this 10.0 * -a == -0.5 // or this or a == 6.0` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `// define a a = 5.0 // eval this 10.0 * (-a) == -0.5 // or this or a == 6.0`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithCommentsBuiltin(t *testing.T) { src := `foo = 1 foo builtin bar : int builtin rab : int // comment builtin baz : int` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `foo = 1 foo builtin bar : int builtin rab : int // comment builtin baz : int`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithTestCaseStmt(t *testing.T) { src := `testcase my_test { a = 1 }` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := "testcase my_test {\n a = 1\n}"; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } } func TestFormatWithComments(t *testing.T) { src := ` // hi // there {_time: r._time, io_time: r._value, // this is the end } // minimal foo = (arg=[1, 2]) => (1) // left left = from(bucket: "test") |> range(start: 2018-05-22T19:53:00Z // i write too many comments , stop: 2018-05-22T19:55:00Z) // and put them in strange places |> drop // this hurts my eyes (columns: ["_start", "_stop"]) // just terrible |> filter(fn: (r) => (r.user // (don't fire me, this is intentional) == "user1")) |> group(by // strange place for a comment : ["user"]) right = from(bucket: "test") |> range(start: 2018-05-22T19:53:00Z, // please stop stop: 2018-05-22T19:55:00Z) |> drop( // spare me the pain // this hurts columns: ["_start", "_stop"// what ]) |> filter( // just why fn: (r) => // user 2 is the best user (r.user == "user2")) |> group(by: //just stop ["_measurement"]) join(tables: {left: left, right: right}, on: ["_time", "_measurement"]) from(bucket, _option // friends ,// stick together ) i = // definitely not true // a // list // of // comments j // not lost` pkg := parser.ParseSource(src) if ast.Check(pkg) > 0 { t.Fatalf("unexpected error: %s", ast.GetError(pkg)) } else if len(pkg.Files) != 1 { t.Fatalf("expected one file in the package, got %d", len(pkg.Files)) } got, err := astutil.Format(pkg.Files[0]) if err != nil { t.Fatal(err) } if want := `// hi // there { _time: r._time, io_time: r._value, // this is the end } // minimal foo = (arg=[1, 2]) => 1 // left left = from(bucket: "test") |> range( start: 2018-05-22T19:53:00Z // i write too many comments , stop: 2018-05-22T19:55:00Z, ) // and put them in strange places |> drop // this hurts my eyes (columns: ["_start", "_stop"]) // just terrible |> filter( fn: (r) => r.user // (don't fire me, this is intentional) == "user1", ) |> group( by // strange place for a comment : ["user"], ) right = from(bucket: "test") |> range( start: 2018-05-22T19:53:00Z, // please stop stop: 2018-05-22T19:55:00Z, ) |> drop( // spare me the pain // this hurts columns: [ "_start", "_stop", // what ], ) |> filter( // just why fn: (r) => // user 2 is the best user (r.user == "user2"), ) |> group( by: //just stop ["_measurement"], ) join(tables: {left: left, right: right}, on: ["_time", "_measurement"]) from(bucket, _option // friends // stick together ) i = // definitely not true // a // list // of // comments j // not lost`; want != got { t.Errorf("unexpected formatted file -want/+got:\n\t- %q\n\t+ %q", want, got) } }
1
16,918
@Marwes @wolffcm This is the file where I made a change the the expected output. Its a little bit weird, but I think that the final output makes sense for the most part.
influxdata-flux
go
@@ -94,9 +94,11 @@ func IsValidAppType(apptype string) bool { func (app *DdevApp) CreateSettingsFile() (string, error) { app.SetApptypeSettingsPaths() - // If neither settings file options are set, then don't continue + // If neither settings file options are set, then don't continue. Return + // a nil error because this should not halt execution if the apptype + // does not have a settings definition. if app.SiteLocalSettingsPath == "" && app.SiteSettingsPath == "" { - return "", fmt.Errorf("Neither SiteLocalSettingsPath nor SiteSettingsPath is set") + return "", nil } // Drupal and WordPress love to change settings files to be unwriteable.
1
package ddevapp import ( "fmt" "os" "path/filepath" ) type settingsCreator func(*DdevApp) (string, error) type uploadDir func(*DdevApp) string // hookDefaultComments should probably change its arg from string to app when // config refactor is done. type hookDefaultComments func() []byte type apptypeSettingsPaths func(app *DdevApp) // appTypeDetect returns true if the app is of the specified type type appTypeDetect func(app *DdevApp) bool // postImportDBAction can take actions after import (like warning user about // required actions on Wordpress. type postImportDBAction func(app *DdevApp) error // configOverrideAction allows a particular apptype to override elements // of the config for that apptype. Key example is drupal6 needing php56 type configOverrideAction func(app *DdevApp) error // postConfigAction allows actions to take place at the end of ddev config type postConfigAction func(app *DdevApp) error // AppTypeFuncs struct defines the functions that can be called (if populated) // for a given appType. type AppTypeFuncs struct { settingsCreator uploadDir hookDefaultComments apptypeSettingsPaths appTypeDetect postImportDBAction configOverrideAction postConfigAction } // appTypeMatrix is a static map that defines the various functions to be called // for each apptype (CMS). // Example: appTypeMatrix["drupal"]["7"] == { settingsCreator etc } var appTypeMatrix map[string]AppTypeFuncs func init() { appTypeMatrix = map[string]AppTypeFuncs{ "php": {}, "drupal6": { createDrupal6SettingsFile, getDrupalUploadDir, getDrupal6Hooks, setDrupalSiteSettingsPaths, isDrupal6App, nil, drupal6ConfigOverrideAction, nil, }, "drupal7": { createDrupal7SettingsFile, getDrupalUploadDir, getDrupal7Hooks, setDrupalSiteSettingsPaths, isDrupal7App, nil, drupal7ConfigOverrideAction, nil, }, "drupal8": { createDrupal8SettingsFile, getDrupalUploadDir, getDrupal8Hooks, setDrupalSiteSettingsPaths, isDrupal8App, nil, nil, nil, }, "wordpress": { createWordpressSettingsFile, getWordpressUploadDir, getWordpressHooks, setWordpressSiteSettingsPaths, isWordpressApp, wordpressPostImportDBAction, nil, nil, }, "typo3": { createTypo3SettingsFile, getTypo3UploadDir, getTypo3Hooks, setTypo3SiteSettingsPaths, isTypo3App, nil, nil, nil, }, "backdrop": { createBackdropSettingsFile, getBackdropUploadDir, getBackdropHooks, setBackdropSiteSettingsPaths, isBackdropApp, backdropPostImportDBAction, nil, nil, }, } } // GetValidAppTypes returns the valid apptype keys from the appTypeMatrix func GetValidAppTypes() []string { keys := make([]string, 0, len(appTypeMatrix)) for k := range appTypeMatrix { keys = append(keys, k) } return keys } // IsValidAppType checks to see if the given apptype string is a valid configured // apptype. func IsValidAppType(apptype string) bool { if _, ok := appTypeMatrix[apptype]; ok { return true } return false } // CreateSettingsFile creates the settings file (like settings.php) for the // provided app is the apptype has a settingsCreator function. func (app *DdevApp) CreateSettingsFile() (string, error) { app.SetApptypeSettingsPaths() // If neither settings file options are set, then don't continue if app.SiteLocalSettingsPath == "" && app.SiteSettingsPath == "" { return "", fmt.Errorf("Neither SiteLocalSettingsPath nor SiteSettingsPath is set") } // Drupal and WordPress love to change settings files to be unwriteable. // Chmod them to something we can work with in the event that they already // exist. chmodTargets := []string{filepath.Dir(app.SiteSettingsPath), app.SiteLocalSettingsPath} for _, fp := range chmodTargets { if fileInfo, err := os.Stat(fp); !os.IsNotExist(err) { perms := 0644 if fileInfo.IsDir() { perms = 0755 } err = os.Chmod(fp, os.FileMode(perms)) if err != nil { return "", fmt.Errorf("could not change permissions on file %s to make it writeable: %v", fp, err) } } } // If we have a function to do the settings creation, do it, otherwise // just ignore. if appFuncs, ok := appTypeMatrix[app.GetType()]; ok && appFuncs.settingsCreator != nil { settingsPath, err := appFuncs.settingsCreator(app) return settingsPath, err } return "", nil } // GetUploadDir returns the upload (public files) directory for the given app func (app *DdevApp) GetUploadDir() string { if appFuncs, ok := appTypeMatrix[app.GetType()]; ok && appFuncs.uploadDir != nil { uploadDir := appFuncs.uploadDir(app) return uploadDir } return "" } // GetHookDefaultComments gets the actual text of the config.yaml hook suggestions // for a given apptype func (app *DdevApp) GetHookDefaultComments() []byte { if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.hookDefaultComments != nil { suggestions := appFuncs.hookDefaultComments() return suggestions } return []byte("") } // SetApptypeSettingsPaths chooses and sets the settings.php/settings.local.php // and related paths for a given app. func (app *DdevApp) SetApptypeSettingsPaths() { if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.apptypeSettingsPaths != nil { appFuncs.apptypeSettingsPaths(app) } } // DetectAppType calls each apptype's detector until it finds a match, // or returns 'php' as a last resort. func (app *DdevApp) DetectAppType() string { for appName, appFuncs := range appTypeMatrix { if appFuncs.appTypeDetect != nil && appFuncs.appTypeDetect(app) { return appName } } return "php" } // PostImportDBAction calls each apptype's detector until it finds a match, // or returns 'php' as a last resort. func (app *DdevApp) PostImportDBAction() error { if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.postImportDBAction != nil { return appFuncs.postImportDBAction(app) } return nil } // ConfigFileOverrideAction gives a chance for an apptype to override any element // of config.yaml that it needs to. func (app *DdevApp) ConfigFileOverrideAction() error { if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.configOverrideAction != nil { return appFuncs.configOverrideAction(app) } return nil } // PostConfigAction gives a chance for an apptype to override do something at // the end of ddev config. func (app *DdevApp) PostConfigAction() error { if appFuncs, ok := appTypeMatrix[app.Type]; ok && appFuncs.postConfigAction != nil { return appFuncs.postConfigAction(app) } return nil }
1
12,404
I think we probably need a util.Warning() here.
drud-ddev
go
@@ -0,0 +1,19 @@ +from quilt3.util import PhysicalKey, get_from_config, fix_url + +from .base import PackageRegistry +from .local import LocalPackageRegistryV1 +from .s3 import S3PackageRegistryV1 + + +def get_package_registry(path=None) -> PackageRegistry: + """ Returns the package registry for a given path """ + # TODO: Don't check if it's PackageRegistry? Then we need better separation + # to external functions that receive string and internal that receive + # PackageRegistry. + if isinstance(path, PackageRegistry): + return path + if not isinstance(path, PhysicalKey): + path = PhysicalKey.from_url( + get_from_config('default_local_registry') if path is None else fix_url(path) + ) + return (LocalPackageRegistryV1 if path.is_local() else S3PackageRegistryV1)(path)
1
1
18,695
Let's make have a signature that's consistent with `PhysicalKey.from_path`. Users should also have access to PhysicalKey since that class is part of the API (e.g., `Package.resolve_hash`).
quiltdata-quilt
py
@@ -0,0 +1,10 @@ +/*eslint no-unused-vars: 0*/ +/* exported utils */ + +/** + * Namespace for imports which holds globals of external dependencies. + * @namespace imports + * @memberof axe + */ + +var imports = (axe.imports = {});
1
1
12,904
What is this directive for?
dequelabs-axe-core
js
@@ -155,6 +155,10 @@ class RefactoringChecker(checkers.BaseTokenChecker): 'if a key is present or a default if not, is simpler and considered ' 'more idiomatic, although sometimes a bit slower' ), + 'R1716': ('simplify chained comparison', + 'chained-comparison', + 'Chained comparisons like "a < b and b < c" can be simplified as "a < b < c"', + ), } options = (('max-nested-blocks', {'default': 5, 'type': 'int', 'metavar': '<int>',
1
# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Claudiu Popa <[email protected]> # Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]> # Copyright (c) 2016 Moises Lopez <[email protected]> # Copyright (c) 2016 Alexander Todorov <[email protected]> # Copyright (c) 2017 Hugo <[email protected]> # Copyright (c) 2017 Bryce Guinta <[email protected]> # Copyright (c) 2017 hippo91 <[email protected]> # Copyright (c) 2017 Łukasz Sznuk <[email protected]> # Copyright (c) 2017 Alex Hearn <[email protected]> # Copyright (c) 2017 Antonio Ossa <[email protected]> # Copyright (c) 2017 Ville Skyttä <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """Looks for code which can be refactored.""" import builtins from functools import reduce import collections import itertools import tokenize import astroid from astroid import decorators from pylint import interfaces from pylint import checkers from pylint import utils as lint_utils from pylint.checkers import utils KNOWN_INFINITE_ITERATORS = { 'itertools.count', } def _all_elements_are_true(gen): values = list(gen) return values and all(values) def _if_statement_is_always_returning(if_node): def _has_return_node(elems, scope): for node in elems: if isinstance(node, astroid.If) and node.orelse: yield _if_statement_is_always_returning(node) if isinstance(node, astroid.Return): yield node.scope() is scope scope = if_node.scope() return _all_elements_are_true( _has_return_node(if_node.body, scope=scope) ) class RefactoringChecker(checkers.BaseTokenChecker): """Looks for code which can be refactored This checker also mixes the astroid and the token approaches in order to create knowledge about whether an "else if" node is a true "else if" node, or an "elif" node. """ __implements__ = (interfaces.ITokenChecker, interfaces.IAstroidChecker) name = 'refactoring' msgs = { 'R1701': ("Consider merging these isinstance calls to isinstance(%s, (%s))", "consider-merging-isinstance", "Used when multiple consecutive isinstance calls can be merged into one."), 'R1706': ("Consider using ternary (%s)", "consider-using-ternary", "Used when one of known pre-python 2.5 ternary syntax is used.",), 'R1709': ("Boolean expression may be simplified to %s", "simplify-boolean-expression", "Emitted when redundant pre-python 2.5 ternary syntax is used.",), 'R1702': ('Too many nested blocks (%s/%s)', 'too-many-nested-blocks', 'Used when a function or a method has too many nested ' 'blocks. This makes the code less understandable and ' 'maintainable.', {'old_names': [('R0101', 'too-many-nested-blocks')]}), 'R1703': ('The if statement can be replaced with %s', 'simplifiable-if-statement', 'Used when an if statement can be replaced with ' '\'bool(test)\'. ', {'old_names': [('R0102', 'simplifiable-if-statement')]}), 'R1704': ('Redefining argument with the local name %r', 'redefined-argument-from-local', 'Used when a local name is redefining an argument, which might ' 'suggest a potential error. This is taken in account only for ' 'a handful of name binding operations, such as for iteration, ' 'with statement assignment and exception handler assignment.' ), 'R1705': ('Unnecessary "else" after "return"', 'no-else-return', 'Used in order to highlight an unnecessary block of ' 'code following an if containing a return statement. ' 'As such, it will warn when it encounters an else ' 'following a chain of ifs, all of them containing a ' 'return statement.' ), 'R1707': ('Disallow trailing comma tuple', 'trailing-comma-tuple', 'In Python, a tuple is actually created by the comma symbol, ' 'not by the parentheses. Unfortunately, one can actually create a ' 'tuple by misplacing a trailing comma, which can lead to potential ' 'weird bugs in your code. You should always use parentheses ' 'explicitly for creating a tuple.'), 'R1708': ('Do not raise StopIteration in generator, use return statement instead', 'stop-iteration-return', 'According to PEP479, the raise of StopIteration to end the loop of ' 'a generator may lead to hard to find bugs. This PEP specify that ' 'raise StopIteration has to be replaced by a simple return statement'), 'R1710': ('Either all return statements in a function should return an expression, ' 'or none of them should.', 'inconsistent-return-statements', 'According to PEP8, if any return statement returns an expression, ' 'any return statements where no value is returned should explicitly ' 'state this as return None, and an explicit return statement ' 'should be present at the end of the function (if reachable)' ), 'R1711': ("Useless return at end of function or method", 'useless-return', 'Emitted when a single "return" or "return None" statement is found ' 'at the end of function or method definition. This statement can safely be ' 'removed because Python will implicitly return None' ), 'R1712': ('Consider using tuple unpacking for swapping variables', 'consider-swap-variables', 'You do not have to use a temporary variable in order to ' 'swap variables. Using "tuple unpacking" to directly swap ' 'variables makes the intention more clear.' ), 'R1713': ('Consider using str.join(sequence) for concatenating ' 'strings from an iterable', 'consider-using-join', 'Using str.join(sequence) is faster, uses less memory ' 'and increases readability compared to for-loop iteration.' ), 'R1714': ('Consider merging these comparisons with "in" to %r', 'consider-using-in', 'To check if a variable is equal to one of many values,' 'combine the values into a tuple and check if the variable is contained "in" it ' 'instead of checking for equality against each of the values.' 'This is faster and less verbose.' ), 'R1715': ('Consider using dict.get for getting values from a dict ' 'if a key is present or a default if not', 'consider-using-get', 'Using the builtin dict.get for getting a value from a dictionary ' 'if a key is present or a default if not, is simpler and considered ' 'more idiomatic, although sometimes a bit slower' ), } options = (('max-nested-blocks', {'default': 5, 'type': 'int', 'metavar': '<int>', 'help': 'Maximum number of nested blocks for function / ' 'method body'} ), ('never-returning-functions', {'default': ('sys.exit',), 'type': 'csv', 'help': 'Complete name of functions that never returns. When checking ' 'for inconsistent-return-statements if a never returning function is ' 'called then it will be considered as an explicit return statement ' 'and no message will be printed.'} ),) priority = 0 def __init__(self, linter=None): checkers.BaseTokenChecker.__init__(self, linter) self._return_nodes = {} self._init() self._never_returning_functions = None def _init(self): self._nested_blocks = [] self._elifs = [] self._nested_blocks_msg = None self._reported_swap_nodes = set() def open(self): # do this in open since config not fully initialized in __init__ self._never_returning_functions = set(self.config.never_returning_functions) @decorators.cachedproperty def _dummy_rgx(self): return lint_utils.get_global_option( self, 'dummy-variables-rgx', default=None) @staticmethod def _is_bool_const(node): return (isinstance(node.value, astroid.Const) and isinstance(node.value.value, bool)) def _is_actual_elif(self, node): """Check if the given node is an actual elif This is a problem we're having with the builtin ast module, which splits `elif` branches into a separate if statement. Unfortunately we need to know the exact type in certain cases. """ if isinstance(node.parent, astroid.If): orelse = node.parent.orelse # current if node must directly follow an "else" if orelse and orelse == [node]: if (node.lineno, node.col_offset) in self._elifs: return True return False def _check_simplifiable_if(self, node): """Check if the given if node can be simplified. The if statement can be reduced to a boolean expression in some cases. For instance, if there are two branches and both of them return a boolean value that depends on the result of the statement's test, then this can be reduced to `bool(test)` without losing any functionality. """ if self._is_actual_elif(node): # Not interested in if statements with multiple branches. return if len(node.orelse) != 1 or len(node.body) != 1: return # Check if both branches can be reduced. first_branch = node.body[0] else_branch = node.orelse[0] if isinstance(first_branch, astroid.Return): if not isinstance(else_branch, astroid.Return): return first_branch_is_bool = self._is_bool_const(first_branch) else_branch_is_bool = self._is_bool_const(else_branch) reduced_to = "'return bool(test)'" elif isinstance(first_branch, astroid.Assign): if not isinstance(else_branch, astroid.Assign): return # Check if we assign to the same value first_branch_targets = [ target.name for target in first_branch.targets if isinstance(target, astroid.AssignName) ] else_branch_targets = [ target.name for target in else_branch.targets if isinstance(target, astroid.AssignName) ] if sorted(first_branch_targets) != sorted(else_branch_targets): return first_branch_is_bool = self._is_bool_const(first_branch) else_branch_is_bool = self._is_bool_const(else_branch) reduced_to = "'var = bool(test)'" else: return if not first_branch_is_bool or not else_branch_is_bool: return if not first_branch.value.value: # This is a case that can't be easily simplified and # if it can be simplified, it will usually result in a # code that's harder to understand and comprehend. # Let's take for instance `arg and arg <= 3`. This could theoretically be # reduced to `not arg or arg > 3`, but the net result is that now the # condition is harder to understand, because it requires understanding of # an extra clause: # * first, there is the negation of truthness with `not arg` # * the second clause is `arg > 3`, which occurs when arg has a # a truth value, but it implies that `arg > 3` is equivalent # with `arg and arg > 3`, which means that the user must # think about this assumption when evaluating `arg > 3`. # The original form is easier to grasp. return self.add_message('simplifiable-if-statement', node=node, args=(reduced_to,)) def process_tokens(self, tokens): # Process tokens and look for 'if' or 'elif' for index, token in enumerate(tokens): token_string = token[1] if token_string == 'elif': # AST exists by the time process_tokens is called, so # it's safe to assume tokens[index+1] # exists. tokens[index+1][2] is the elif's position as # reported by CPython and PyPy, # tokens[index][2] is the actual position and also is # reported by IronPython. self._elifs.extend([tokens[index][2], tokens[index+1][2]]) elif is_trailing_comma(tokens, index): if self.linter.is_message_enabled('trailing-comma-tuple'): self.add_message('trailing-comma-tuple', line=token.start[0]) def leave_module(self, _): self._init() @utils.check_messages('too-many-nested-blocks') def visit_tryexcept(self, node): self._check_nested_blocks(node) visit_tryfinally = visit_tryexcept visit_while = visit_tryexcept def _check_redefined_argument_from_local(self, name_node): if self._dummy_rgx and self._dummy_rgx.match(name_node.name): return if not name_node.lineno: # Unknown position, maybe it is a manually built AST? return scope = name_node.scope() if not isinstance(scope, astroid.FunctionDef): return for defined_argument in scope.args.nodes_of_class(astroid.AssignName): if defined_argument.name == name_node.name: self.add_message('redefined-argument-from-local', node=name_node, args=(name_node.name, )) @utils.check_messages('redefined-argument-from-local', 'too-many-nested-blocks') def visit_for(self, node): self._check_nested_blocks(node) for name in node.target.nodes_of_class(astroid.AssignName): self._check_redefined_argument_from_local(name) @utils.check_messages('redefined-argument-from-local') def visit_excepthandler(self, node): if node.name and isinstance(node.name, astroid.AssignName): self._check_redefined_argument_from_local(node.name) @utils.check_messages('redefined-argument-from-local') def visit_with(self, node): for _, names in node.items: if not names: continue for name in names.nodes_of_class(astroid.AssignName): self._check_redefined_argument_from_local(name) def _check_superfluous_else_return(self, node): if not node.orelse: # Not interested in if statements without else. return if _if_statement_is_always_returning(node) and not self._is_actual_elif(node): self.add_message('no-else-return', node=node) def _check_consider_get(self, node): def type_and_name_are_equal(node_a, node_b): for _type in [astroid.Name, astroid.AssignName]: if all(isinstance(_node, _type) for _node in [node_a, node_b]): return node_a.name == node_b.name if all(isinstance(_node, astroid.Const) for _node in [node_a, node_b]): return node_a.value == node_b.value return False if_block_ok = ( isinstance(node.test, astroid.Compare) and len(node.body) == 1 and isinstance(node.body[0], astroid.Assign) and isinstance(node.body[0].value, astroid.Subscript) and type_and_name_are_equal(node.body[0].value.value, node.test.ops[0][1]) and type_and_name_are_equal(node.body[0].value.slice.value, node.test.left) and len(node.body[0].targets) == 1 and isinstance(utils.safe_infer(node.test.ops[0][1]), astroid.Dict)) if if_block_ok and not node.orelse: self.add_message('consider-using-get', node=node) elif (if_block_ok and len(node.orelse) == 1 and isinstance(node.orelse[0], astroid.Assign) and type_and_name_are_equal(node.orelse[0].targets[0], node.body[0].targets[0]) and len(node.orelse[0].targets) == 1): self.add_message('consider-using-get', node=node) @utils.check_messages('too-many-nested-blocks', 'simplifiable-if-statement', 'no-else-return', 'consider-using-get') def visit_if(self, node): self._check_simplifiable_if(node) self._check_nested_blocks(node) self._check_superfluous_else_return(node) self._check_consider_get(node) @utils.check_messages('too-many-nested-blocks', 'inconsistent-return-statements', 'useless-return') def leave_functiondef(self, node): # check left-over nested blocks stack self._emit_nested_blocks_message_if_needed(self._nested_blocks) # new scope = reinitialize the stack of nested blocks self._nested_blocks = [] # check consistent return statements self._check_consistent_returns(node) # check for single return or return None at the end self._check_return_at_the_end(node) self._return_nodes[node.name] = [] @utils.check_messages('stop-iteration-return') def visit_raise(self, node): self._check_stop_iteration_inside_generator(node) def _check_stop_iteration_inside_generator(self, node): """Check if an exception of type StopIteration is raised inside a generator""" frame = node.frame() if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator(): return if utils.node_ignores_exception(node, StopIteration): return if not node.exc: return exc = utils.safe_infer(node.exc) if exc is None or exc is astroid.Uninferable: return if self._check_exception_inherit_from_stopiteration(exc): self.add_message('stop-iteration-return', node=node) @staticmethod def _check_exception_inherit_from_stopiteration(exc): """Return True if the exception node in argument inherit from StopIteration""" stopiteration_qname = '{}.StopIteration'.format(utils.EXCEPTIONS_MODULE) return any(_class.qname() == stopiteration_qname for _class in exc.mro()) @utils.check_messages('stop-iteration-return') def visit_call(self, node): self._check_raising_stopiteration_in_generator_next_call(node) def _check_raising_stopiteration_in_generator_next_call(self, node): """Check if a StopIteration exception is raised by the call to next function If the next value has a default value, then do not add message. :param node: Check to see if this Call node is a next function :type node: :class:`astroid.node_classes.Call` """ def _looks_like_infinite_iterator(param): inferred = utils.safe_infer(param) if inferred is not None or inferred is not astroid.Uninferable: return inferred.qname() in KNOWN_INFINITE_ITERATORS return False inferred = utils.safe_infer(node.func) if getattr(inferred, 'name', '') == 'next': frame = node.frame() # The next builtin can only have up to two # positional arguments and no keyword arguments has_sentinel_value = len(node.args) > 1 if (isinstance(frame, astroid.FunctionDef) and frame.is_generator() and not has_sentinel_value and not utils.node_ignores_exception(node, StopIteration) and not _looks_like_infinite_iterator(node.args[0])): self.add_message('stop-iteration-return', node=node) def _check_nested_blocks(self, node): """Update and check the number of nested blocks """ # only check block levels inside functions or methods if not isinstance(node.scope(), astroid.FunctionDef): return # messages are triggered on leaving the nested block. Here we save the # stack in case the current node isn't nested in the previous one nested_blocks = self._nested_blocks[:] if node.parent == node.scope(): self._nested_blocks = [node] else: # go through ancestors from the most nested to the less for ancestor_node in reversed(self._nested_blocks): if ancestor_node == node.parent: break self._nested_blocks.pop() # if the node is an elif, this should not be another nesting level if isinstance(node, astroid.If) and self._is_actual_elif(node): if self._nested_blocks: self._nested_blocks.pop() self._nested_blocks.append(node) # send message only once per group of nested blocks if len(nested_blocks) > len(self._nested_blocks): self._emit_nested_blocks_message_if_needed(nested_blocks) def _emit_nested_blocks_message_if_needed(self, nested_blocks): if len(nested_blocks) > self.config.max_nested_blocks: self.add_message('too-many-nested-blocks', node=nested_blocks[0], args=(len(nested_blocks), self.config.max_nested_blocks)) @staticmethod def _duplicated_isinstance_types(node): """Get the duplicated types from the underlying isinstance calls. :param astroid.BoolOp node: Node which should contain a bunch of isinstance calls. :returns: Dictionary of the comparison objects from the isinstance calls, to duplicate values from consecutive calls. :rtype: dict """ duplicated_objects = set() all_types = collections.defaultdict(set) for call in node.values: if not isinstance(call, astroid.Call) or len(call.args) != 2: continue inferred = utils.safe_infer(call.func) if not inferred or not utils.is_builtin_object(inferred): continue if inferred.name != 'isinstance': continue isinstance_object = call.args[0].as_string() isinstance_types = call.args[1] if isinstance_object in all_types: duplicated_objects.add(isinstance_object) if isinstance(isinstance_types, astroid.Tuple): elems = [class_type.as_string() for class_type in isinstance_types.itered()] else: elems = [isinstance_types.as_string()] all_types[isinstance_object].update(elems) # Remove all keys which not duplicated return {key: value for key, value in all_types.items() if key in duplicated_objects} def _check_consider_merging_isinstance(self, node): """Check isinstance calls which can be merged together.""" if node.op != 'or': return first_args = self._duplicated_isinstance_types(node) for duplicated_name, class_names in first_args.items(): names = sorted(name for name in class_names) self.add_message('consider-merging-isinstance', node=node, args=(duplicated_name, ', '.join(names))) def _check_consider_using_in(self, node): allowed_ops = {'or': '==', 'and': '!='} if node.op not in allowed_ops or len(node.values) < 2: return for value in node.values: if (not isinstance(value, astroid.Compare) or len(value.ops) != 1 or value.ops[0][0] not in allowed_ops[node.op]): return for comparable in value.left, value.ops[0][1]: if isinstance(comparable, astroid.Call): return # Gather variables and values from comparisons variables, values = [], [] for value in node.values: variable_set = set() for comparable in value.left, value.ops[0][1]: if isinstance(comparable, astroid.Name): variable_set.add(comparable.as_string()) values.append(comparable.as_string()) variables.append(variable_set) # Look for (common-)variables that occur in all comparisons common_variables = reduce(lambda a, b: a.intersection(b), variables) if not common_variables: return # Gather information for the suggestion common_variable = sorted(list(common_variables))[0] comprehension = 'in' if node.op == 'or' else 'not in' values = list(collections.OrderedDict.fromkeys(values)) values.remove(common_variable) values_string = ', '.join(values) if len(values) != 1 else values[0] + ',' suggestion = "%s %s (%s)" % (common_variable, comprehension, values_string) self.add_message('consider-using-in', node=node, args=(suggestion,)) @utils.check_messages('consider-merging-isinstance', 'consider-using-in') def visit_boolop(self, node): self._check_consider_merging_isinstance(node) self._check_consider_using_in(node) @staticmethod def _is_simple_assignment(node): return (isinstance(node, astroid.Assign) and len(node.targets) == 1 and isinstance(node.targets[0], astroid.node_classes.AssignName) and isinstance(node.value, astroid.node_classes.Name)) def _check_swap_variables(self, node): if not node.next_sibling() or not node.next_sibling().next_sibling(): return assignments = [ node, node.next_sibling(), node.next_sibling().next_sibling() ] if not all(self._is_simple_assignment(node) for node in assignments): return if any(node in self._reported_swap_nodes for node in assignments): return left = [node.targets[0].name for node in assignments] right = [node.value.name for node in assignments] if left[0] == right[-1] and left[1:] == right[:-1]: self._reported_swap_nodes.update(assignments) message = 'consider-swap-variables' self.add_message(message, node=node) @utils.check_messages('simplify-boolean-expression', 'consider-using-ternary', 'consider-swap-variables') def visit_assign(self, node): self._check_swap_variables(node) if self._is_and_or_ternary(node.value): cond, truth_value, false_value = self._and_or_ternary_arguments(node.value) elif self._is_seq_based_ternary(node.value): cond, truth_value, false_value = self._seq_based_ternary_params(node.value) else: return if truth_value.bool_value() is False: message = 'simplify-boolean-expression' suggestion = false_value.as_string() else: message = 'consider-using-ternary' suggestion = '{truth} if {cond} else {false}'.format( truth=truth_value.as_string(), cond=cond.as_string(), false=false_value.as_string() ) self.add_message(message, node=node, args=(suggestion,)) visit_return = visit_assign def _check_consider_using_join(self, aug_assign): """ We start with the augmented assignment and work our way upwards. Names of variables for nodes if match successful: result = '' # assign for number in ['1', '2', '3'] # for_loop result += number # aug_assign """ for_loop = aug_assign.parent if not isinstance(for_loop, astroid.node_classes.For): return assign = for_loop.previous_sibling() if not isinstance(assign, astroid.node_classes.Assign): return result_assign_names = {target.name for target in assign.targets} is_concat_loop = (aug_assign.op == '+=' and isinstance(aug_assign.target, astroid.AssignName) and len(for_loop.body) == 1 and aug_assign.target.name in result_assign_names and isinstance(assign.value, astroid.node_classes.Const) and isinstance(assign.value.value, str) and isinstance(aug_assign.value, astroid.node_classes.Name) and aug_assign.value.name == for_loop.target.name) if is_concat_loop: self.add_message('consider-using-join', node=aug_assign) @utils.check_messages('consider-using-join') def visit_augassign(self, node): self._check_consider_using_join(node) @staticmethod def _is_and_or_ternary(node): """ Returns true if node is 'condition and true_value else false_value' form. All of: condition, true_value and false_value should not be a complex boolean expression """ return (isinstance(node, astroid.BoolOp) and node.op == 'or' and len(node.values) == 2 and isinstance(node.values[0], astroid.BoolOp) and not isinstance(node.values[1], astroid.BoolOp) and node.values[0].op == 'and' and not isinstance(node.values[0].values[1], astroid.BoolOp) and len(node.values[0].values) == 2) @staticmethod def _and_or_ternary_arguments(node): false_value = node.values[1] condition, true_value = node.values[0].values return condition, true_value, false_value @staticmethod def _is_seq_based_ternary(node): """Returns true if node is '[false_value,true_value][condition]' form""" return (isinstance(node, astroid.Subscript) and isinstance(node.value, (astroid.Tuple, astroid.List)) and len(node.value.elts) == 2 and isinstance(node.slice, astroid.Index)) @staticmethod def _seq_based_ternary_params(node): false_value, true_value = node.value.elts condition = node.slice.value return condition, true_value, false_value def visit_functiondef(self, node): self._return_nodes[node.name] = [] return_nodes = node.nodes_of_class(astroid.Return) self._return_nodes[node.name] = [_rnode for _rnode in return_nodes if _rnode.frame() == node.frame()] def _check_consistent_returns(self, node): """Check that all return statements inside a function are consistent. Return statements are consistent if: - all returns are explicit and if there is no implicit return; - all returns are empty and if there is, possibly, an implicit return. Args: node (astroid.FunctionDef): the function holding the return statements. """ # explicit return statements are those with a not None value explicit_returns = [_node for _node in self._return_nodes[node.name] if _node.value is not None] if not explicit_returns: return if (len(explicit_returns) == len(self._return_nodes[node.name]) and self._is_node_return_ended(node)): return self.add_message('inconsistent-return-statements', node=node) def _is_node_return_ended(self, node): """Check if the node ends with an explicit return statement. Args: node (astroid.NodeNG): node to be checked. Returns: bool: True if the node ends with an explicit statement, False otherwise. """ # Recursion base case if isinstance(node, astroid.Return): return True if isinstance(node, astroid.Call): try: funcdef_node = node.func.inferred()[0] if self._is_function_def_never_returning(funcdef_node): return True except astroid.InferenceError: pass # Avoid the check inside while loop as we don't know # if they will be completed if isinstance(node, astroid.While): return True if isinstance(node, astroid.Raise): # a Raise statement doesn't need to end with a return statement # but if the exception raised is handled, then the handler has to # ends with a return statement if not node.exc: # Ignore bare raises return True if not utils.is_node_inside_try_except(node): # If the raise statement is not inside a try/except statement # then the exception is raised and cannot be caught. No need # to infer it. return True exc = utils.safe_infer(node.exc) if exc is None or exc is astroid.Uninferable: return False exc_name = exc.pytype().split('.')[-1] handlers = utils.get_exception_handlers(node, exc_name) handlers = list(handlers) if handlers is not None else [] if handlers: # among all the handlers handling the exception at least one # must end with a return statement return any(self._is_node_return_ended(_handler) for _handler in handlers) # if no handlers handle the exception then it's ok return True if isinstance(node, astroid.If): # if statement is returning if there are exactly two return statements in its # children : one for the body part, the other for the orelse part # Do not check if inner function definition are return ended. return_stmts = [self._is_node_return_ended(_child) for _child in node.get_children() if not isinstance(_child, astroid.FunctionDef)] return sum(return_stmts) == 2 # recurses on the children of the node except for those which are except handler # because one cannot be sure that the handler will really be used return any(self._is_node_return_ended(_child) for _child in node.get_children() if not isinstance(_child, astroid.ExceptHandler)) def _is_function_def_never_returning(self, node): """Return True if the function never returns. False otherwise. Args: node (astroid.FunctionDef): function definition node to be analyzed. Returns: bool: True if the function never returns, False otherwise. """ try: return node.qname() in self._never_returning_functions except TypeError: return False def _check_return_at_the_end(self, node): """Check for presence of a *single* return statement at the end of a function. "return" or "return None" are useless because None is the default return type if they are missing. NOTE: produces a message only if there is a single return statement in the function body. Otherwise _check_consistent_returns() is called! Per its implementation and PEP8 we can have a "return None" at the end of the function body if there are other return statements before that! """ if len(self._return_nodes[node.name]) > 1: return if not node.body: return last = node.body[-1] if isinstance(last, astroid.Return): # e.g. "return" if last.value is None: self.add_message('useless-return', node=node) # return None" elif isinstance(last.value, astroid.Const) and (last.value.value is None): self.add_message('useless-return', node=node) class RecommandationChecker(checkers.BaseChecker): __implements__ = (interfaces.IAstroidChecker,) name = 'refactoring' msgs = {'C0200': ('Consider using enumerate instead of iterating with range and len', 'consider-using-enumerate', 'Emitted when code that iterates with range and len is ' 'encountered. Such code can be simplified by using the ' 'enumerate builtin.'), 'C0201': ('Consider iterating the dictionary directly instead of calling .keys()', 'consider-iterating-dictionary', 'Emitted when the keys of a dictionary are iterated through the .keys() ' 'method. It is enough to just iterate through the dictionary itself, as ' 'in "for key in dictionary".'), } @staticmethod def _is_builtin(node, function): inferred = utils.safe_infer(node) if not inferred: return False return utils.is_builtin_object(inferred) and inferred.name == function @utils.check_messages('consider-iterating-dictionary') def visit_call(self, node): inferred = utils.safe_infer(node.func) if not inferred: return if not isinstance(inferred, astroid.BoundMethod): return if not isinstance(inferred.bound, astroid.Dict) or inferred.name != 'keys': return if isinstance(node.parent, (astroid.For, astroid.Comprehension)): self.add_message('consider-iterating-dictionary', node=node) @utils.check_messages('consider-using-enumerate') def visit_for(self, node): """Emit a convention whenever range and len are used for indexing.""" # Verify that we have a `range([start], len(...), [stop])` call and # that the object which is iterated is used as a subscript in the # body of the for. # Is it a proper range call? if not isinstance(node.iter, astroid.Call): return if not self._is_builtin(node.iter.func, 'range'): return if len(node.iter.args) == 2 and not _is_constant_zero(node.iter.args[0]): return if len(node.iter.args) > 2: return # Is it a proper len call? if not isinstance(node.iter.args[-1], astroid.Call): return second_func = node.iter.args[-1].func if not self._is_builtin(second_func, 'len'): return len_args = node.iter.args[-1].args if not len_args or len(len_args) != 1: return iterating_object = len_args[0] if not isinstance(iterating_object, astroid.Name): return # Verify that the body of the for loop uses a subscript # with the object that was iterated. This uses some heuristics # in order to make sure that the same object is used in the # for body. for child in node.body: for subscript in child.nodes_of_class(astroid.Subscript): if not isinstance(subscript.value, astroid.Name): continue if not isinstance(subscript.slice, astroid.Index): continue if not isinstance(subscript.slice.value, astroid.Name): continue if subscript.slice.value.name != node.target.name: continue if iterating_object.name != subscript.value.name: continue if subscript.value.scope() != node.scope(): # Ignore this subscript if it's not in the same # scope. This means that in the body of the for # loop, another scope was created, where the same # name for the iterating object was used. continue self.add_message('consider-using-enumerate', node=node) return class NotChecker(checkers.BaseChecker): """checks for too many not in comparison expressions - "not not" should trigger a warning - "not" followed by a comparison should trigger a warning """ __implements__ = (interfaces.IAstroidChecker,) msgs = {'C0113': ('Consider changing "%s" to "%s"', 'unneeded-not', 'Used when a boolean expression contains an unneeded ' 'negation.'), } name = 'basic' reverse_op = {'<': '>=', '<=': '>', '>': '<=', '>=': '<', '==': '!=', '!=': '==', 'in': 'not in', 'is': 'is not'} # sets are not ordered, so for example "not set(LEFT_VALS) <= set(RIGHT_VALS)" is # not equivalent to "set(LEFT_VALS) > set(RIGHT_VALS)" skipped_nodes = (astroid.Set,) # 'builtins' py3, '__builtin__' py2 skipped_classnames = ['%s.%s' % (builtins.__name__, qname) for qname in ('set', 'frozenset')] @utils.check_messages('unneeded-not') def visit_unaryop(self, node): if node.op != 'not': return operand = node.operand if isinstance(operand, astroid.UnaryOp) and operand.op == 'not': self.add_message('unneeded-not', node=node, args=(node.as_string(), operand.operand.as_string())) elif isinstance(operand, astroid.Compare): left = operand.left # ignore multiple comparisons if len(operand.ops) > 1: return operator, right = operand.ops[0] if operator not in self.reverse_op: return # Ignore __ne__ as function of __eq__ frame = node.frame() if frame.name == '__ne__' and operator == '==': return for _type in (utils.node_type(left), utils.node_type(right)): if not _type: return if isinstance(_type, self.skipped_nodes): return if (isinstance(_type, astroid.Instance) and _type.qname() in self.skipped_classnames): return suggestion = '%s %s %s' % (left.as_string(), self.reverse_op[operator], right.as_string()) self.add_message('unneeded-not', node=node, args=(node.as_string(), suggestion)) def _is_len_call(node): """Checks if node is len(SOMETHING).""" return (isinstance(node, astroid.Call) and isinstance(node.func, astroid.Name) and node.func.name == 'len') def _is_constant_zero(node): return isinstance(node, astroid.Const) and node.value == 0 def _has_constant_value(node, value): return isinstance(node, astroid.Const) and node.value == value def _node_is_test_condition(node): """ Checks if node is an if, while, assert or if expression statement.""" return isinstance(node, (astroid.If, astroid.While, astroid.Assert, astroid.IfExp)) class LenChecker(checkers.BaseChecker): """Checks for incorrect usage of len() inside conditions. Pep8 states: For sequences, (strings, lists, tuples), use the fact that empty sequences are false. Yes: if not seq: if seq: No: if len(seq): if not len(seq): Problems detected: * if len(sequence): * if not len(sequence): * if len(sequence) == 0: * if len(sequence) != 0: * if len(sequence) > 0: * if len(sequence) < 1: * if len(sequence) <= 0: """ __implements__ = (interfaces.IAstroidChecker,) # configuration section name name = 'len' msgs = {'C1801': ('Do not use `len(SEQUENCE)` to determine if a sequence is empty', 'len-as-condition', 'Used when Pylint detects that len(sequence) is being used inside ' 'a condition to determine if a sequence is empty. Instead of ' 'comparing the length to 0, rely on the fact that empty sequences ' 'are false.'), } priority = -2 options = () @utils.check_messages('len-as-condition') def visit_call(self, node): # a len(S) call is used inside a test condition # could be if, while, assert or if expression statement # e.g. `if len(S):` if _is_len_call(node): # the len() call could also be nested together with other # boolean operations, e.g. `if z or len(x):` parent = node.parent while isinstance(parent, astroid.BoolOp): parent = parent.parent # we're finally out of any nested boolean operations so check if # this len() call is part of a test condition if not _node_is_test_condition(parent): return if not (node is parent.test or parent.test.parent_of(node)): return self.add_message('len-as-condition', node=node) @utils.check_messages('len-as-condition') def visit_unaryop(self, node): """`not len(S)` must become `not S` regardless if the parent block is a test condition or something else (boolean expression) e.g. `if not len(S):`""" if isinstance(node, astroid.UnaryOp) and node.op == 'not' and _is_len_call(node.operand): self.add_message('len-as-condition', node=node) @utils.check_messages('len-as-condition') def visit_compare(self, node): # compare nodes are trickier because the len(S) expression # may be somewhere in the middle of the node # note: astroid.Compare has the left most operand in node.left # while the rest are a list of tuples in node.ops # the format of the tuple is ('compare operator sign', node) # here we squash everything into `ops` to make it easier for processing later ops = [('', node.left)] ops.extend(node.ops) ops = list(itertools.chain(*ops)) for ops_idx in range(len(ops) - 2): op_1 = ops[ops_idx] op_2 = ops[ops_idx + 1] op_3 = ops[ops_idx + 2] error_detected = False # 0 ?? len() if _is_constant_zero(op_1) and op_2 in ['==', '!=', '<', '>='] and _is_len_call(op_3): error_detected = True # len() ?? 0 elif _is_len_call(op_1) and op_2 in ['==', '!=', '>', '<='] and _is_constant_zero(op_3): error_detected = True elif _has_constant_value(op_1, value=1) and op_2 == '>' and _is_len_call(op_3): error_detected = True elif _is_len_call(op_1) and op_2 == '<' and _has_constant_value(op_3, value=1): error_detected = True if error_detected: parent = node.parent # traverse the AST to figure out if this comparison was part of # a test condition while parent and not _node_is_test_condition(parent): parent = parent.parent # report only if this len() comparison is part of a test condition # for example: return len() > 0 should not report anything if _node_is_test_condition(parent): self.add_message('len-as-condition', node=node) def is_trailing_comma(tokens, index): """Check if the given token is a trailing comma :param tokens: Sequence of modules tokens :type tokens: list[tokenize.TokenInfo] :param int index: Index of token under check in tokens :returns: True if the token is a comma which trails an expression :rtype: bool """ token = tokens[index] if token.exact_type != tokenize.COMMA: return False # Must have remaining tokens on the same line such as NEWLINE left_tokens = itertools.islice(tokens, index + 1, None) same_line_remaining_tokens = list(itertools.takewhile( lambda other_token, _token=token: other_token.start[0] == _token.start[0], left_tokens )) # Note: If the newline is tokenize.NEWLINE and not tokenize.NL # then the newline denotes the end of expression is_last_element = all( other_token.type in (tokenize.NEWLINE, tokenize.COMMENT) for other_token in same_line_remaining_tokens ) if not same_line_remaining_tokens or not is_last_element: return False def get_curline_index_start(): """Get the index denoting the start of the current line""" for subindex, token in enumerate(reversed(tokens[:index])): # See Lib/tokenize.py and Lib/token.py in cpython for more info if token.type in (tokenize.NEWLINE, tokenize.NL): return index - subindex return 0 curline_start = get_curline_index_start() for prevtoken in tokens[curline_start:index]: if '=' in prevtoken.string: return True return False def register(linter): """Required method to auto register this checker.""" linter.register_checker(RefactoringChecker(linter)) linter.register_checker(NotChecker(linter)) linter.register_checker(RecommandationChecker(linter)) linter.register_checker(LenChecker(linter))
1
10,103
I'd rephrase it as `Simplify chained comparison between the operands`.
PyCQA-pylint
py
@@ -296,11 +296,10 @@ class ProductDataFixture $this->clearResources(); $this->productsByCatnum = []; - $onlyForFirstDomain = false; $this->productDataReferenceInjector->loadReferences( $this->productDataFixtureLoader, $this->persistentReferenceFacade, - $onlyForFirstDomain + 2 ); }
1
<?php namespace Shopsys\FrameworkBundle\DataFixtures\Performance; use Doctrine\ORM\EntityManagerInterface; use Faker\Generator as Faker; use Shopsys\FrameworkBundle\Component\Console\ProgressBarFactory; use Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade; use Shopsys\FrameworkBundle\Component\Doctrine\SqlLoggerFacade; use Shopsys\FrameworkBundle\DataFixtures\Demo\ProductDataFixtureCsvReader; use Shopsys\FrameworkBundle\DataFixtures\Demo\ProductDataFixtureLoader; use Shopsys\FrameworkBundle\DataFixtures\ProductDataFixtureReferenceInjector; use Shopsys\FrameworkBundle\Model\Category\Category; use Shopsys\FrameworkBundle\Model\Category\CategoryRepository; use Shopsys\FrameworkBundle\Model\Product\Availability\ProductAvailabilityRecalculationScheduler; use Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceRecalculationScheduler; use Shopsys\FrameworkBundle\Model\Product\Product; use Shopsys\FrameworkBundle\Model\Product\ProductData; use Shopsys\FrameworkBundle\Model\Product\ProductFacade; use Shopsys\FrameworkBundle\Model\Product\ProductVariantFacade; use Symfony\Component\Console\Output\OutputInterface; class ProductDataFixture { const BATCH_SIZE = 1000; const FIRST_PERFORMANCE_PRODUCT = 'first_performance_product'; /** * @var int */ private $productTotalCount; /** * @var \Doctrine\ORM\EntityManagerInterface */ private $em; /** * @var \Shopsys\FrameworkBundle\Model\Product\ProductFacade */ private $productFacade; /** * @var \Shopsys\FrameworkBundle\DataFixtures\Demo\ProductDataFixtureLoader */ private $productDataFixtureLoader; /** * @var \Shopsys\FrameworkBundle\Component\Doctrine\SqlLoggerFacade */ private $sqlLoggerFacade; /** * @var \Shopsys\FrameworkBundle\Model\Product\ProductVariantFacade */ private $productVariantFacade; /** * @var \Shopsys\FrameworkBundle\DataFixtures\ProductDataFixtureReferenceInjector */ private $productDataReferenceInjector; /** * @var \Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade */ private $persistentReferenceFacade; /** * @var \Shopsys\FrameworkBundle\Model\Category\CategoryRepository */ private $categoryRepository; /** * @var int */ private $countImported; /** * @var int */ private $demoDataIterationCounter; /** * @var \Shopsys\FrameworkBundle\Model\Product\Product[] */ private $productsByCatnum; /** * @var \Faker\Generator */ private $faker; /** * @var \Shopsys\FrameworkBundle\Model\Product\Availability\ProductAvailabilityRecalculationScheduler */ private $productAvailabilityRecalculationScheduler; /** * @var \Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceRecalculationScheduler */ private $productPriceRecalculationScheduler; /** * @var \Shopsys\FrameworkBundle\DataFixtures\Demo\ProductDataFixtureCsvReader */ private $productDataFixtureCsvReader; /** * @var \Shopsys\FrameworkBundle\Component\Console\ProgressBarFactory */ private $progressBarFactory; /** * @param int $productTotalCount * @param \Doctrine\ORM\EntityManagerInterface $em * @param \Shopsys\FrameworkBundle\Model\Product\ProductFacade $productFacade * @param \Shopsys\FrameworkBundle\DataFixtures\Demo\ProductDataFixtureLoader $productDataFixtureLoader * @param \Shopsys\FrameworkBundle\Component\Doctrine\SqlLoggerFacade $sqlLoggerFacade * @param \Shopsys\FrameworkBundle\Model\Product\ProductVariantFacade $productVariantFacade * @param \Shopsys\FrameworkBundle\DataFixtures\ProductDataFixtureReferenceInjector $productDataReferenceInjector * @param \Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade $persistentReferenceFacade * @param \Shopsys\FrameworkBundle\Model\Category\CategoryRepository $categoryRepository * @param \Faker\Generator $faker * @param \Shopsys\FrameworkBundle\Model\Product\Availability\ProductAvailabilityRecalculationScheduler $productAvailabilityRecalculationScheduler * @param \Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceRecalculationScheduler $productPriceRecalculationScheduler * @param \Shopsys\FrameworkBundle\DataFixtures\Demo\ProductDataFixtureCsvReader $productDataFixtureCsvReader * @param \Shopsys\FrameworkBundle\Component\Console\ProgressBarFactory $progressBarFactory */ public function __construct( $productTotalCount, EntityManagerInterface $em, ProductFacade $productFacade, ProductDataFixtureLoader $productDataFixtureLoader, SqlLoggerFacade $sqlLoggerFacade, ProductVariantFacade $productVariantFacade, ProductDataFixtureReferenceInjector $productDataReferenceInjector, PersistentReferenceFacade $persistentReferenceFacade, CategoryRepository $categoryRepository, Faker $faker, ProductAvailabilityRecalculationScheduler $productAvailabilityRecalculationScheduler, ProductPriceRecalculationScheduler $productPriceRecalculationScheduler, ProductDataFixtureCsvReader $productDataFixtureCsvReader, ProgressBarFactory $progressBarFactory ) { $this->productTotalCount = $productTotalCount; $this->em = $em; $this->productFacade = $productFacade; $this->productDataFixtureLoader = $productDataFixtureLoader; $this->sqlLoggerFacade = $sqlLoggerFacade; $this->productVariantFacade = $productVariantFacade; $this->productDataReferenceInjector = $productDataReferenceInjector; $this->persistentReferenceFacade = $persistentReferenceFacade; $this->categoryRepository = $categoryRepository; $this->countImported = 0; $this->demoDataIterationCounter = 0; $this->faker = $faker; $this->productAvailabilityRecalculationScheduler = $productAvailabilityRecalculationScheduler; $this->productPriceRecalculationScheduler = $productPriceRecalculationScheduler; $this->productDataFixtureCsvReader = $productDataFixtureCsvReader; $this->progressBarFactory = $progressBarFactory; } /** * @param \Symfony\Component\Console\Output\OutputInterface $output */ public function load(OutputInterface $output) { // Sql logging during mass data import makes memory leak $this->sqlLoggerFacade->temporarilyDisableLogging(); $this->cleanAndLoadReferences(); $csvRows = $this->productDataFixtureCsvReader->getProductDataFixtureCsvRows(); $variantCatnumsByMainVariantCatnum = $this->productDataFixtureLoader->getVariantCatnumsIndexedByMainVariantCatnum( $csvRows ); $progressBar = $this->progressBarFactory->create($output, $this->productTotalCount); while ($this->countImported < $this->productTotalCount) { $row = next($csvRows); if ($row === false) { $this->createVariants($variantCatnumsByMainVariantCatnum); $row = reset($csvRows); $this->demoDataIterationCounter++; } $productData = $this->productDataFixtureLoader->createProductDataFromRowForFirstDomain($row); $this->productDataFixtureLoader->updateProductDataFromCsvRowForSecondDomain($productData, $row); $this->makeProductDataUnique($productData); $this->setRandomPerformanceCategoriesToProductData($productData); $product = $this->productFacade->create($productData); if ($this->countImported === 0) { $this->persistentReferenceFacade->persistReference(self::FIRST_PERFORMANCE_PRODUCT, $product); } if ($product->getCatnum() !== null) { $this->productsByCatnum[$product->getCatnum()] = $product; } if ($this->countImported % self::BATCH_SIZE === 0) { $currentKey = key($csvRows); $this->cleanAndLoadReferences(); $this->setArrayPointerByKey($csvRows, $currentKey); } $this->countImported++; $progressBar->setProgress($this->countImported); } $this->createVariants($variantCatnumsByMainVariantCatnum); $progressBar->finish(); $this->em->clear(); $this->sqlLoggerFacade->reenableLogging(); } /** * @param string[][] $variantCatnumsByMainVariantCatnum */ private function createVariants(array $variantCatnumsByMainVariantCatnum) { $uniqueIndex = $this->getUniqueIndex(); foreach ($variantCatnumsByMainVariantCatnum as $mainVariantCatnum => $variantsCatnums) { try { $mainProduct = $this->getProductByCatnum($mainVariantCatnum . $uniqueIndex); $variants = []; foreach ($variantsCatnums as $variantCatnum) { $variants[] = $this->getProductByCatnum($variantCatnum . $uniqueIndex); } $this->productVariantFacade->createVariant($mainProduct, $variants); } catch (\Doctrine\ORM\NoResultException $e) { continue; } } } /** * @param string $catnum * @return \Shopsys\FrameworkBundle\Model\Product\Product */ private function getProductByCatnum($catnum) { if (!array_key_exists($catnum, $this->productsByCatnum)) { $query = $this->em->createQuery('SELECT p FROM ' . Product::class . ' p WHERE p.catnum = :catnum') ->setParameter('catnum', $catnum); $this->productsByCatnum[$catnum] = $query->getSingleResult(); } return $this->productsByCatnum[$catnum]; } /** * @param \Shopsys\FrameworkBundle\Model\Product\ProductData $productData */ private function makeProductDataUnique(ProductData $productData) { $matches = []; $uniqueIndex = $this->getUniqueIndex(); if (preg_match('/^(.*) #\d+$/', $productData->catnum, $matches)) { $productData->catnum = $matches[1] . $uniqueIndex; } else { $productData->catnum .= $uniqueIndex; } foreach ($productData->name as $locale => $name) { if (preg_match('/^(.*) #\d+$/', $name, $matches)) { $productData->name[$locale] = $matches[1] . $uniqueIndex; } else { $productData->name[$locale] .= $uniqueIndex; } } } /** * @return string */ private function getUniqueIndex() { return ' #' . $this->demoDataIterationCounter; } private function clearResources() { $this->productAvailabilityRecalculationScheduler->cleanScheduleForImmediateRecalculation(); $this->productPriceRecalculationScheduler->cleanScheduleForImmediateRecalculation(); $this->em->clear(); gc_collect_cycles(); } private function cleanAndLoadReferences() { $this->clearResources(); $this->productsByCatnum = []; $onlyForFirstDomain = false; $this->productDataReferenceInjector->loadReferences( $this->productDataFixtureLoader, $this->persistentReferenceFacade, $onlyForFirstDomain ); } /** * @param \Shopsys\FrameworkBundle\Model\Product\ProductData $productData */ private function setRandomPerformanceCategoriesToProductData(ProductData $productData) { $this->cleanPerformanceCategoriesFromProductDataByDomainId($productData, 1); $this->cleanPerformanceCategoriesFromProductDataByDomainId($productData, 2); $this->addRandomPerformanceCategoriesToProductDataByDomainId($productData, 1); $this->addRandomPerformanceCategoriesToProductDataByDomainId($productData, 2); } /** * @param \Shopsys\FrameworkBundle\Model\Product\ProductData $productData * @param int $domainId */ private function cleanPerformanceCategoriesFromProductDataByDomainId(ProductData $productData, $domainId) { foreach ($productData->categoriesByDomainId[$domainId] as $key => $category) { if ($this->isPerformanceCategory($category)) { unset($productData->categoriesByDomainId[$domainId][$key]); } } } /** * @param \Shopsys\FrameworkBundle\Model\Product\ProductData $productData * @param int $domainId */ private function addRandomPerformanceCategoriesToProductDataByDomainId(ProductData $productData, $domainId) { $performanceCategoryIds = $this->getPerformanceCategoryIds(); $randomPerformanceCategoryIds = $this->faker->randomElements( $performanceCategoryIds, $this->faker->numberBetween(1, 4) ); $randomPerformanceCategories = $this->categoryRepository->getCategoriesByIds($randomPerformanceCategoryIds); foreach ($randomPerformanceCategories as $performanceCategory) { if (!in_array($performanceCategory, $productData->categoriesByDomainId[$domainId], true)) { $productData->categoriesByDomainId[$domainId][] = $performanceCategory; } } } /** * @return int[] */ private function getPerformanceCategoryIds() { $allCategoryIds = $this->categoryRepository->getAllIds(); $firstPerformanceCategory = $this->persistentReferenceFacade->getReference( CategoryDataFixture::FIRST_PERFORMANCE_CATEGORY ); $firstPerformanceCategoryKey = array_search($firstPerformanceCategory->getId(), $allCategoryIds, true); return array_slice($allCategoryIds, $firstPerformanceCategoryKey); } /** * @param \Shopsys\FrameworkBundle\Model\Category\Category $category * @return bool */ private function isPerformanceCategory(Category $category) { $firstPerformanceCategory = $this->persistentReferenceFacade->getReference( CategoryDataFixture::FIRST_PERFORMANCE_CATEGORY ); /* @var $firstPerformanceCategory \Shopsys\FrameworkBundle\Model\Category\Category */ return $category->getId() >= $firstPerformanceCategory->getId(); } /** * @param array $array * @param string|int $key */ private function setArrayPointerByKey(array &$array, $key) { reset($array); while (key($array) !== $key) { if (each($array) === false) { throw new \Shopsys\FrameworkBundle\DataFixtures\Performance\Exception\UndefinedArrayKeyException($key); } } } }
1
12,509
so in the performance data fixtures, there will be references for the second domain only?
shopsys-shopsys
php
@@ -304,7 +304,7 @@ public class DefaultGridRegistry extends BaseGridRegistry implements GridRegistr if (proxy == null) { return; } - LOG.info("Registered a node " + proxy); + LOG.finest("Registered a node " + proxy); try { lock.lock();
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.grid.internal; import net.jcip.annotations.ThreadSafe; import org.openqa.grid.internal.listeners.RegistrationListener; import org.openqa.grid.internal.listeners.SelfHealingProxy; import org.openqa.grid.web.Hub; import org.openqa.grid.web.servlet.handler.RequestHandler; import org.openqa.selenium.remote.DesiredCapabilities; import org.openqa.selenium.remote.server.log.LoggingManager; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Level; import java.util.logging.Logger; /** * Kernel of the grid. Keeps track of what's happening, what's free/used and assigns resources to * incoming requests. */ @ThreadSafe public class DefaultGridRegistry extends BaseGridRegistry implements GridRegistry { private static final Logger LOG = Logger.getLogger(DefaultGridRegistry.class.getName()); protected static class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { @Override public void uncaughtException(Thread t, Throwable e) { LOG.log(Level.SEVERE, "Matcher thread dying due to unhandled exception.", e); } } // lock for anything modifying the tests session currently running on this // registry. private final ReentrantLock lock = new ReentrantLock(); private final Condition testSessionAvailable = lock.newCondition(); private final ProxySet proxies; private final ActiveTestSessions activeTestSessions = new ActiveTestSessions(); private final NewSessionRequestQueue newSessionQueue; private final Matcher matcherThread = new Matcher(); private final Set<RemoteProxy> registeringProxies = ConcurrentHashMap.newKeySet(); private volatile boolean stop = false; public DefaultGridRegistry() { this(null); } public DefaultGridRegistry(Hub hub) { super(hub); this.newSessionQueue = new NewSessionRequestQueue(); proxies = new ProxySet((hub != null) ? hub.getConfiguration().throwOnCapabilityNotPresent : true); this.matcherThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler()); } @Override public void start() { matcherThread.start(); // freynaud : TODO // Grid registry is in a valid state when testSessionAvailable.await(); from // assignRequestToProxy is reached. Not before. try { Thread.sleep(250); } catch (InterruptedException e) { e.printStackTrace(); } } /** * Creates a new {@link GridRegistry} and starts it. * * @param hub the {@link Hub} to associate this registry with * @return the registry */ public static GridRegistry newInstance(Hub hub) { DefaultGridRegistry registry = new DefaultGridRegistry(hub); registry.start(); return registry; } /** * Ends this test session for the hub, releasing the resources in the hub / registry. It does not * release anything on the remote. The resources are released in a separate thread, so the call * returns immediately. It allows release with long duration not to block the test while the hub is * releasing the resource. * * @param session The session to terminate * @param reason the reason for termination */ @Override public void terminate(final TestSession session, final SessionTerminationReason reason) { // Thread safety reviewed new Thread(() -> _release(session.getSlot(), reason)).start(); } /** * Release the test slot. Free the resource on the slot itself and the registry. If also invokes * the {@link org.openqa.grid.internal.listeners.TestSessionListener#afterSession(TestSession)} if * applicable. * * @param testSlot The slot to release */ private void _release(TestSlot testSlot, SessionTerminationReason reason) { if (!testSlot.startReleaseProcess()) { return; } if (!testSlot.performAfterSessionEvent()) { return; } final String internalKey = testSlot.getInternalKey(); try { lock.lock(); testSlot.finishReleaseProcess(); release(internalKey, reason); } finally { lock.unlock(); } } void terminateSynchronousFOR_TEST_ONLY(TestSession testSession) { _release(testSession.getSlot(), SessionTerminationReason.CLIENT_STOPPED_SESSION); } /** * @see GridRegistry#removeIfPresent(RemoteProxy) */ @Override public void removeIfPresent(RemoteProxy proxy) { // Find the original proxy. While the supplied one is logically equivalent, it may be a fresh object with // an empty TestSlot list, which doesn't figure into the proxy equivalence check. Since we want to free up // those test sessions, we need to operate on that original object. if (proxies.contains(proxy)) { LOG.warning(String.format( "Cleaning up stale test sessions on the unregistered node %s", proxy)); final RemoteProxy p = proxies.remove(proxy); p.getTestSlots().forEach(testSlot -> forceRelease(testSlot, SessionTerminationReason.PROXY_REREGISTRATION) ); p.teardown(); } } /** * @see GridRegistry#forceRelease(TestSlot, SessionTerminationReason) */ @Override public void forceRelease(TestSlot testSlot, SessionTerminationReason reason) { if (testSlot.getSession() == null) { return; } String internalKey = testSlot.getInternalKey(); release(internalKey, reason); testSlot.doFinishRelease(); } /** * iterates the queue of incoming new session request and assign them to proxy after they've been * sorted by priority, with priority defined by the prioritizer. */ class Matcher extends Thread { // Thread safety reviewed Matcher() { super("Matcher thread"); } @Override public void run() { try { lock.lock(); assignRequestToProxy(); } finally { lock.unlock(); } } } /** * @see GridRegistry#stop() */ @Override public void stop() { stop = true; matcherThread.interrupt(); newSessionQueue.stop(); proxies.teardown(); } /** * @see GridRegistry#addNewSessionRequest(RequestHandler) */ @Override public void addNewSessionRequest(RequestHandler handler) { try { lock.lock(); proxies.verifyAbilityToHandleDesiredCapabilities(handler.getRequest().getDesiredCapabilities()); newSessionQueue.add(handler); fireMatcherStateChanged(); } finally { lock.unlock(); } } /** * iterates the list of incoming session request to find a potential match in the list of proxies. * If something changes in the registry, the matcher iteration is stopped to account for that * change. */ private void assignRequestToProxy() { while (!stop) { try { testSessionAvailable.await(5, TimeUnit.SECONDS); newSessionQueue.processQueue( this::takeRequestHandler, getHub().getConfiguration().prioritizer); // Just make sure we delete anything that is logged on this thread from memory LoggingManager.perSessionLogHandler().clearThreadTempLogs(); } catch (InterruptedException e) { LOG.info("Shutting down registry."); } catch (Throwable t) { LOG.log(Level.SEVERE, "Unhandled exception in Matcher thread.", t); } } } private boolean takeRequestHandler(RequestHandler handler) { final TestSession session = proxies.getNewSession(handler.getRequest().getDesiredCapabilities()); final boolean sessionCreated = session != null; if (sessionCreated) { activeTestSessions.add(session); handler.bindSession(session); } return sessionCreated; } /** * mark the session as finished for the registry. The resources that were associated to it are now * free to be reserved by other tests * * @param session The session * @param reason the reason for the release */ private void release(TestSession session, SessionTerminationReason reason) { try { lock.lock(); boolean removed = activeTestSessions.remove(session, reason); if (removed) { fireMatcherStateChanged(); } } finally { lock.unlock(); } } private void release(String internalKey, SessionTerminationReason reason) { if (internalKey == null) { return; } final TestSession session1 = activeTestSessions.findSessionByInternalKey(internalKey); if (session1 != null) { release(session1, reason); return; } LOG.warning("Tried to release session with internal key " + internalKey + " but couldn't find it."); } /** * @see GridRegistry#add(RemoteProxy) */ @Override public void add(RemoteProxy proxy) { if (proxy == null) { return; } LOG.info("Registered a node " + proxy); try { lock.lock(); removeIfPresent(proxy); if (registeringProxies.contains(proxy)) { LOG.warning(String.format("Proxy '%s' is already queued for registration.", proxy)); return; } // Updating browserTimeout and timeout values in case a node sends null values proxy.getConfig().timeout = Optional .ofNullable(proxy.getConfig().timeout) .orElse(getHub().getConfiguration().timeout); proxy.getConfig().browserTimeout = Optional .ofNullable(proxy.getConfig().browserTimeout) .orElse(getHub().getConfiguration().browserTimeout); registeringProxies.add(proxy); fireMatcherStateChanged(); } finally { lock.unlock(); } boolean listenerOk = true; try { if (proxy instanceof RegistrationListener) { ((RegistrationListener) proxy).beforeRegistration(); } } catch (Throwable t) { LOG.severe("Error running the registration listener on " + proxy + ", " + t.getMessage()); t.printStackTrace(); listenerOk = false; } try { lock.lock(); registeringProxies.remove(proxy); if (listenerOk) { if (proxy instanceof SelfHealingProxy) { ((SelfHealingProxy) proxy).startPolling(); } proxies.add(proxy); fireMatcherStateChanged(); } } finally { lock.unlock(); } } /** * @see GridRegistry#setThrowOnCapabilityNotPresent(boolean) */ @Override public void setThrowOnCapabilityNotPresent(boolean throwOnCapabilityNotPresent) { proxies.setThrowOnCapabilityNotPresent(throwOnCapabilityNotPresent); } private void fireMatcherStateChanged() { testSessionAvailable.signalAll(); } /** * @see GridRegistry#getAllProxies() */ @Override public ProxySet getAllProxies() { return proxies; } /** * @see GridRegistry#getUsedProxies() */ @Override public List<RemoteProxy> getUsedProxies() { return proxies.getBusyProxies(); } /** * @see GridRegistry#getSession(ExternalSessionKey) */ @Override public TestSession getSession(ExternalSessionKey externalKey) { return activeTestSessions.findSessionByExternalKey(externalKey); } /** * @see GridRegistry#getExistingSession(ExternalSessionKey) */ @Override public TestSession getExistingSession(ExternalSessionKey externalKey) { return activeTestSessions.getExistingSession(externalKey); } /** * @see GridRegistry#getNewSessionRequestCount() */ @Override public int getNewSessionRequestCount() { // may race return newSessionQueue.getNewSessionRequestCount(); } /** * @see GridRegistry#clearNewSessionRequests() */ @Override public void clearNewSessionRequests() { newSessionQueue.clearNewSessionRequests(); } /** * @see GridRegistry#removeNewSessionRequest(RequestHandler) */ @Override public boolean removeNewSessionRequest(RequestHandler request) { return newSessionQueue.removeNewSessionRequest(request); } /** * @see GridRegistry#getDesiredCapabilities() */ @Override public Iterable<DesiredCapabilities> getDesiredCapabilities() { return newSessionQueue.getDesiredCapabilities(); } /** * @see GridRegistry#getActiveSessions() */ @Override public Set<TestSession> getActiveSessions() { return activeTestSessions.unmodifiableSet(); } /** * @see GridRegistry#getProxyById(String) */ @Override public RemoteProxy getProxyById(String id) { return proxies.getProxyById(id); } }
1
16,452
This is wildly unhelpful to users --- they need to know when a proxy has been registered.
SeleniumHQ-selenium
rb
@@ -203,7 +203,7 @@ class ClientPlayback: # https://github.com/mitmproxy/mitmproxy/issues/2197 if hf.request.http_version == "HTTP/2.0": hf.request.http_version = "HTTP/1.1" - host = hf.request.headers.pop(":authority") + host = hf.request.headers.pop(":authority", hf.request.pretty_host) hf.request.headers.insert(0, "host", host) self.q.put(hf) ctx.master.addons.trigger("update", lst)
1
import queue import threading import typing import time from mitmproxy import log from mitmproxy import controller from mitmproxy import exceptions from mitmproxy import http from mitmproxy import flow from mitmproxy import options from mitmproxy import connections from mitmproxy.net import server_spec, tls from mitmproxy.net.http import http1 from mitmproxy.coretypes import basethread from mitmproxy.utils import human from mitmproxy import ctx from mitmproxy import io from mitmproxy import command import mitmproxy.types class RequestReplayThread(basethread.BaseThread): daemon = True def __init__( self, opts: options.Options, channel: controller.Channel, queue: queue.Queue, ) -> None: self.options = opts self.channel = channel self.queue = queue self.inflight = threading.Event() super().__init__("RequestReplayThread") def run(self): while True: f = self.queue.get() self.inflight.set() self.replay(f) self.inflight.clear() def replay(self, f): # pragma: no cover f.live = True r = f.request bsl = human.parse_size(self.options.body_size_limit) first_line_format_backup = r.first_line_format server = None try: f.response = None # If we have a channel, run script hooks. request_reply = self.channel.ask("request", f) if isinstance(request_reply, http.HTTPResponse): f.response = request_reply if not f.response: # In all modes, we directly connect to the server displayed if self.options.mode.startswith("upstream:"): server_address = server_spec.parse_with_mode(self.options.mode)[1].address server = connections.ServerConnection(server_address) server.connect() if r.scheme == "https": connect_request = http.make_connect_request((r.data.host, r.port)) server.wfile.write(http1.assemble_request(connect_request)) server.wfile.flush() resp = http1.read_response( server.rfile, connect_request, body_size_limit=bsl ) if resp.status_code != 200: raise exceptions.ReplayException( "Upstream server refuses CONNECT request" ) server.establish_tls( sni=f.server_conn.sni, **tls.client_arguments_from_options(self.options) ) r.first_line_format = "relative" else: r.first_line_format = "absolute" else: server_address = (r.host, r.port) server = connections.ServerConnection(server_address) server.connect() if r.scheme == "https": server.establish_tls( sni=f.server_conn.sni, **tls.client_arguments_from_options(self.options) ) r.first_line_format = "relative" server.wfile.write(http1.assemble_request(r)) server.wfile.flush() r.timestamp_start = r.timestamp_end = time.time() if f.server_conn: f.server_conn.close() f.server_conn = server f.response = http.HTTPResponse.wrap( http1.read_response(server.rfile, r, body_size_limit=bsl) ) response_reply = self.channel.ask("response", f) if response_reply == exceptions.Kill: raise exceptions.Kill() except (exceptions.ReplayException, exceptions.NetlibException) as e: f.error = flow.Error(str(e)) self.channel.ask("error", f) except exceptions.Kill: self.channel.tell("log", log.LogEntry("Connection killed", "info")) except Exception as e: self.channel.tell("log", log.LogEntry(repr(e), "error")) finally: r.first_line_format = first_line_format_backup f.live = False if server.connected(): server.finish() server.close() class ClientPlayback: def __init__(self): self.q = queue.Queue() self.thread: RequestReplayThread = None def check(self, f: http.HTTPFlow): if f.live: return "Can't replay live flow." if f.intercepted: return "Can't replay intercepted flow." if not f.request: return "Can't replay flow with missing request." if f.request.raw_content is None: return "Can't replay flow with missing content." def load(self, loader): loader.add_option( "client_replay", typing.Sequence[str], [], "Replay client requests from a saved file." ) def running(self): self.thread = RequestReplayThread( ctx.options, ctx.master.channel, self.q, ) self.thread.start() def configure(self, updated): if "client_replay" in updated and ctx.options.client_replay: try: flows = io.read_flows_from_paths(ctx.options.client_replay) except exceptions.FlowReadException as e: raise exceptions.OptionsError(str(e)) self.start_replay(flows) @command.command("replay.client.count") def count(self) -> int: """ Approximate number of flows queued for replay. """ inflight = 1 if self.thread and self.thread.inflight.is_set() else 0 return self.q.qsize() + inflight @command.command("replay.client.stop") def stop_replay(self) -> None: """ Clear the replay queue. """ with self.q.mutex: lst = list(self.q.queue) self.q.queue.clear() for f in lst: f.revert() ctx.master.addons.trigger("update", lst) ctx.log.alert("Client replay queue cleared.") @command.command("replay.client") def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None: """ Add flows to the replay queue, skipping flows that can't be replayed. """ lst = [] for f in flows: hf = typing.cast(http.HTTPFlow, f) err = self.check(hf) if err: ctx.log.warn(err) continue lst.append(hf) # Prepare the flow for replay hf.backup() hf.request.is_replay = True hf.response = None hf.error = None # https://github.com/mitmproxy/mitmproxy/issues/2197 if hf.request.http_version == "HTTP/2.0": hf.request.http_version = "HTTP/1.1" host = hf.request.headers.pop(":authority") hf.request.headers.insert(0, "host", host) self.q.put(hf) ctx.master.addons.trigger("update", lst) @command.command("replay.client.file") def load_file(self, path: mitmproxy.types.Path) -> None: """ Load flows from file, and add them to the replay queue. """ try: flows = io.read_flows_from_paths([path]) except exceptions.FlowReadException as e: raise exceptions.CommandError(str(e)) self.start_replay(flows)
1
14,492
If there is no authority header (i.e. someone intentionally deleted it), I would argue we probably don't want a Host header in the replay either. How about we only add it if it exists, and do nothing otherwise?
mitmproxy-mitmproxy
py
@@ -65,12 +65,7 @@ func TaskMetadataHandler(state dockerstate.TaskEngineState, ecsClient api.ECSCli for _, containerResponse := range taskResponse.Containers { networks, err := GetContainerNetworkMetadata(containerResponse.ID, state) if err != nil { - errResponseJSON, err := json.Marshal(err.Error()) - if e := utils.WriteResponseIfMarshalError(w, err); e != nil { - return - } - utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJSON, utils.RequestTypeContainerMetadata) - return + seelog.Warnf("Error retrieving network metadata for container %s - %s", containerResponse.ID, err) } containerResponse.Networks = networks responses = append(responses, containerResponse)
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package v4 import ( "encoding/json" "fmt" "net/http" "github.com/aws/amazon-ecs-agent/agent/api" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/handlers/utils" v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" "github.com/cihub/seelog" ) // TaskMetadataPath specifies the relative URI path for serving task metadata. var TaskMetadataPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/task" // TaskWithTagsMetadataPath specifies the relative URI path for serving task metdata // with Container Instance and Task Tags retrieved through the ECS API var TaskWithTagsMetadataPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/taskWithTags" // TaskMetadataHandler returns the handler method for handling task metadata requests. func TaskMetadataHandler(state dockerstate.TaskEngineState, ecsClient api.ECSClient, cluster, az, containerInstanceArn string, propagateTags bool) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { var taskArn, err = v3.GetTaskARNByRequest(r, state) if err != nil { ResponseJSON, err := json.Marshal(fmt.Sprintf("V4 task metadata handler: unable to get task arn from request: %s", err.Error())) if e := utils.WriteResponseIfMarshalError(w, err); e != nil { return } utils.WriteJSONToResponse(w, http.StatusInternalServerError, ResponseJSON, utils.RequestTypeTaskMetadata) return } seelog.Infof("V4 taskMetadata handler: Writing response for task '%s'", taskArn) taskResponse, err := NewTaskResponse(taskArn, state, ecsClient, cluster, az, containerInstanceArn, propagateTags) if err != nil { errResponseJson, err := json.Marshal("Unable to generate metadata for v4 task: '" + taskArn + "'") if e := utils.WriteResponseIfMarshalError(w, err); e != nil { return } utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJson, utils.RequestTypeTaskMetadata) return } task, _ := state.TaskByArn(taskArn) // for non-awsvpc task mode if !task.IsNetworkModeAWSVPC() { // fill in non-awsvpc network details for container responses here responses := make([]ContainerResponse, 0) for _, containerResponse := range taskResponse.Containers { networks, err := GetContainerNetworkMetadata(containerResponse.ID, state) if err != nil { errResponseJSON, err := json.Marshal(err.Error()) if e := utils.WriteResponseIfMarshalError(w, err); e != nil { return } utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJSON, utils.RequestTypeContainerMetadata) return } containerResponse.Networks = networks responses = append(responses, containerResponse) } taskResponse.Containers = responses } responseJSON, err := json.Marshal(taskResponse) if e := utils.WriteResponseIfMarshalError(w, err); e != nil { return } utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeTaskMetadata) } }
1
25,644
is there any unit test that can be updated to verify this?
aws-amazon-ecs-agent
go
@@ -418,7 +418,18 @@ static int cb_lua_filter(const void *data, size_t bytes, lua_pushstring(ctx->lua->state, tag); lua_pushnumber(ctx->lua->state, ts); lua_pushmsgpack(ctx->lua->state, p); - lua_call(ctx->lua->state, 3, 3); + if (ctx->protected_mode) { + ret = lua_pcall(ctx->lua->state, 3, 3, 0); + if (ret != 0) { + flb_plg_error(ctx->ins, "error code %d: %s", + ret, lua_tostring(ctx->lua->state, -1)); + lua_pop(ctx->lua->state, 1); + return FLB_FILTER_NOTOUCH; + } + } + else { + lua_call(ctx->lua->state, 3, 3); + } /* Initialize Return values */ l_code = 0;
1
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_compat.h> #include <fluent-bit/flb_filter.h> #include <fluent-bit/flb_filter_plugin.h> #include <fluent-bit/flb_luajit.h> #include <fluent-bit/flb_utils.h> #include <fluent-bit/flb_pack.h> #include <fluent-bit/flb_sds.h> #include <fluent-bit/flb_time.h> #include <msgpack.h> #include "lua_config.h" static void lua_pushmsgpack(lua_State *l, msgpack_object *o) { int i; int size; lua_checkstack(l, 3); switch(o->type) { case MSGPACK_OBJECT_NIL: lua_pushnil(l); break; case MSGPACK_OBJECT_BOOLEAN: lua_pushboolean(l, o->via.boolean); break; case MSGPACK_OBJECT_POSITIVE_INTEGER: lua_pushnumber(l, (double) o->via.u64); break; case MSGPACK_OBJECT_NEGATIVE_INTEGER: lua_pushnumber(l, (double) o->via.i64); break; case MSGPACK_OBJECT_FLOAT32: case MSGPACK_OBJECT_FLOAT64: lua_pushnumber(l, (double) o->via.f64); break; case MSGPACK_OBJECT_STR: lua_pushlstring(l, o->via.str.ptr, o->via.str.size); break; case MSGPACK_OBJECT_BIN: lua_pushlstring(l, o->via.bin.ptr, o->via.bin.size); break; case MSGPACK_OBJECT_EXT: lua_pushlstring(l, o->via.ext.ptr, o->via.ext.size); break; case MSGPACK_OBJECT_ARRAY: size = o->via.array.size; lua_createtable(l, size, 0); if (size != 0) { msgpack_object *p = o->via.array.ptr; for (i = 0; i < size; i++) { lua_pushmsgpack(l, p+i); lua_rawseti (l, -2, i+1); } } break; case MSGPACK_OBJECT_MAP: size = o->via.map.size; lua_createtable(l, 0, size); if (size != 0) { msgpack_object_kv *p = o->via.map.ptr; for (i = 0; i < size; i++) { lua_pushmsgpack(l, &(p+i)->key); lua_pushmsgpack(l, &(p+i)->val); lua_settable(l, -3); } } break; } } static int lua_arraylength(lua_State *l) { lua_Integer n; int count = 0; int max = 0; lua_pushnil(l); while (lua_next(l, -2) != 0) { if (lua_type(l, -2) == LUA_TNUMBER) { n = lua_tonumber(l, -2); if (n > 0) { max = n > max ? n : max; count++; lua_pop(l, 1); continue; } } lua_pop(l, 2); return -1; } if (max != count) return -1; return max; } static void lua_tomsgpack(struct lua_filter *lf, msgpack_packer *pck, int index); static void try_to_convert_data_type(struct lua_filter *lf, msgpack_packer *pck, int index) { size_t len; const char *tmp = NULL; lua_State *l = lf->lua->state; struct mk_list *tmp_list = NULL; struct mk_list *head = NULL; struct l2c_type *l2c = NULL; if ((lua_type(l, -2) == LUA_TSTRING) && lua_type(l, -1) == LUA_TNUMBER){ tmp = lua_tolstring(l, -2, &len); mk_list_foreach_safe(head, tmp_list, &lf->l2c_types) { l2c = mk_list_entry(head, struct l2c_type, _head); if (!strncmp(l2c->key, tmp, len)) { lua_tomsgpack(lf, pck, -1); msgpack_pack_int64(pck, (int64_t)lua_tonumber(l, -1)); return; } } } /* not matched */ lua_tomsgpack(lf, pck, -1); lua_tomsgpack(lf, pck, 0); } static void lua_tomsgpack(struct lua_filter *lf, msgpack_packer *pck, int index) { int len; int i; lua_State *l = lf->lua->state; switch (lua_type(l, -1 + index)) { case LUA_TSTRING: { const char *str; size_t len; str = lua_tolstring(l, -1 + index, &len); msgpack_pack_str(pck, len); msgpack_pack_str_body(pck, str, len); } break; case LUA_TNUMBER: { double num = lua_tonumber(l, -1 + index); msgpack_pack_double(pck, num); } break; case LUA_TBOOLEAN: if (lua_toboolean(l, -1 + index)) msgpack_pack_true(pck); else msgpack_pack_false(pck); break; case LUA_TTABLE: len = lua_arraylength(l); if (len > 0) { msgpack_pack_array(pck, len); for (i = 1; i <= len; i++) { lua_rawgeti(l, -1, i); lua_tomsgpack(lf, pck, 0); lua_pop(l, 1); } } else { len = 0; lua_pushnil(l); while (lua_next(l, -2) != 0) { lua_pop(l, 1); len++; } msgpack_pack_map(pck, len); lua_pushnil(l); if (lf->l2c_types_num > 0) { /* type conversion */ while (lua_next(l, -2) != 0) { try_to_convert_data_type(lf, pck, index); lua_pop(l, 1); } } else { while (lua_next(l, -2) != 0) { lua_tomsgpack(lf, pck, -1); lua_tomsgpack(lf, pck, 0); lua_pop(l, 1); } } } break; case LUA_TNIL: msgpack_pack_nil(pck); break; case LUA_TLIGHTUSERDATA: if (lua_touserdata(l, -1 + index) == NULL) { msgpack_pack_nil(pck); break; } case LUA_TFUNCTION: case LUA_TUSERDATA: case LUA_TTHREAD: /* cannot serialize */ break; } } static int is_valid_func(lua_State *lua, flb_sds_t func) { int ret = FLB_FALSE; lua_getglobal(lua, func); if (lua_isfunction(lua, -1)) { ret = FLB_TRUE; } lua_pop(lua, -1); /* discard return value of isfunction */ return ret; } static int cb_lua_init(struct flb_filter_instance *f_ins, struct flb_config *config, void *data) { int ret; (void) data; struct lua_filter *ctx; struct flb_luajit *lj; /* Create context */ ctx = lua_config_create(f_ins, config); if (!ctx) { flb_error("[filter_lua] filter cannot be loaded"); return -1; } /* Create LuaJIT state/vm */ lj = flb_luajit_create(config); if (!lj) { lua_config_destroy(ctx); return -1; } ctx->lua = lj; /* Load Script */ ret = flb_luajit_load_script(ctx->lua, ctx->script); if (ret == -1) { lua_config_destroy(ctx); return -1; } lua_pcall(ctx->lua->state, 0, 0, 0); if (is_valid_func(ctx->lua->state, ctx->call) != FLB_TRUE) { flb_plg_error(ctx->ins, "function %s is not found", ctx->call); lua_config_destroy(ctx); return -1; } /* Set context */ flb_filter_set_context(f_ins, ctx); return 0; } static int pack_result (struct flb_time *ts, msgpack_packer *pck, msgpack_sbuffer *sbuf, char *data, size_t bytes) { int ret; int size; int i; size_t off = 0; msgpack_object root; msgpack_unpacked result; msgpack_unpacked_init(&result); ret = msgpack_unpack_next(&result, data, bytes, &off); if (ret != MSGPACK_UNPACK_SUCCESS) { msgpack_unpacked_destroy(&result); return FLB_FALSE; } root = result.data; /* check for array */ if (root.type == MSGPACK_OBJECT_ARRAY) { size = root.via.array.size; if (size > 0) { msgpack_object *map = root.via.array.ptr; for (i = 0; i < size; i++) { if ((map+i)->type != MSGPACK_OBJECT_MAP) { msgpack_unpacked_destroy(&result); return FLB_FALSE; } if ((map+i)->via.map.size <= 0) { msgpack_unpacked_destroy(&result); return FLB_FALSE; } /* main array */ msgpack_pack_array(pck, 2); /* timestamp: convert from double to Fluent Bit format */ flb_time_append_to_msgpack(ts, pck, 0); /* Pack lua table */ msgpack_pack_object(pck, *(map+i)); } msgpack_unpacked_destroy(&result); return FLB_TRUE; } else { msgpack_unpacked_destroy(&result); return FLB_FALSE; } } /* check for map */ if (root.type != MSGPACK_OBJECT_MAP) { msgpack_unpacked_destroy(&result); return FLB_FALSE; } if (root.via.map.size <= 0) { msgpack_unpacked_destroy(&result); return FLB_FALSE; } /* main array */ msgpack_pack_array(pck, 2); flb_time_append_to_msgpack(ts, pck, 0); /* Pack lua table */ msgpack_sbuffer_write(sbuf, data, bytes); msgpack_unpacked_destroy(&result); return FLB_TRUE; } static int cb_lua_filter(const void *data, size_t bytes, const char *tag, int tag_len, void **out_buf, size_t *out_bytes, struct flb_filter_instance *f_ins, void *filter_context, struct flb_config *config) { int ret; size_t off = 0; (void) f_ins; (void) config; double ts; msgpack_object *p; msgpack_object root; msgpack_unpacked result; msgpack_sbuffer tmp_sbuf; msgpack_packer tmp_pck; struct flb_time t_orig; struct flb_time t; struct lua_filter *ctx = filter_context; /* Lua return values */ int l_code; double l_timestamp; /* Create temporal msgpack buffer */ msgpack_sbuffer_init(&tmp_sbuf); msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write); msgpack_unpacked_init(&result); while (msgpack_unpack_next(&result, data, bytes, &off) == MSGPACK_UNPACK_SUCCESS) { msgpack_packer data_pck; msgpack_sbuffer data_sbuf; msgpack_sbuffer_init(&data_sbuf); msgpack_packer_init(&data_pck, &data_sbuf, msgpack_sbuffer_write); root = result.data; /* Get timestamp */ flb_time_pop_from_msgpack(&t, &result, &p); t_orig = t; ts = flb_time_to_double(&t); /* Prepare function call, pass 3 arguments, expect 3 return values */ lua_getglobal(ctx->lua->state, ctx->call); lua_pushstring(ctx->lua->state, tag); lua_pushnumber(ctx->lua->state, ts); lua_pushmsgpack(ctx->lua->state, p); lua_call(ctx->lua->state, 3, 3); /* Initialize Return values */ l_code = 0; l_timestamp = ts; lua_tomsgpack(ctx, &data_pck, 0); lua_pop(ctx->lua->state, 1); l_timestamp = (double) lua_tonumber(ctx->lua->state, -1); lua_pop(ctx->lua->state, 1); l_code = (int) lua_tointeger(ctx->lua->state, -1); lua_pop(ctx->lua->state, 1); if (l_code == -1) { /* Skip record */ msgpack_sbuffer_destroy(&data_sbuf); continue; } else if (l_code == 0) { /* Keep record, repack */ msgpack_pack_object(&tmp_pck, root); } else if (l_code == 1 || l_code == 2) { /* Modified, pack new data */ if (l_code == 1) { flb_time_from_double(&t, l_timestamp); } else if(l_code == 2) { /* Keep the timestamp */ t = t_orig; } ret = pack_result(&t, &tmp_pck, &tmp_sbuf, data_sbuf.data, data_sbuf.size); if (ret == FLB_FALSE) { flb_plg_error(ctx->ins, "invalid table returned at %s(), %s", ctx->call, ctx->script); msgpack_sbuffer_destroy(&tmp_sbuf); msgpack_sbuffer_destroy(&data_sbuf); msgpack_unpacked_destroy(&result); return FLB_FILTER_NOTOUCH; } } else { /* Unexpected return code, keep original content */ flb_plg_error(ctx->ins, "unexpected Lua script return code %i, " "original record will be kept." , l_code); msgpack_pack_object(&tmp_pck, root); } msgpack_sbuffer_destroy(&data_sbuf); } msgpack_unpacked_destroy(&result); /* link new buffers */ *out_buf = tmp_sbuf.data; *out_bytes = tmp_sbuf.size; return FLB_FILTER_MODIFIED; } static int cb_lua_exit(void *data, struct flb_config *config) { struct lua_filter *ctx; ctx = data; flb_luajit_destroy(ctx->lua); lua_config_destroy(ctx); return 0; } struct flb_filter_plugin filter_lua_plugin = { .name = "lua", .description = "Lua Scripting Filter", .cb_init = cb_lua_init, .cb_filter = cb_lua_filter, .cb_exit = cb_lua_exit, .flags = 0 };
1
11,535
exiting at this point is leaking memory, take a look at the valgrind output. the sbuffer must be destroyed.
fluent-fluent-bit
c
@@ -0,0 +1,13 @@ +def pytest_addoption(parser): + parser.addoption( + '--poppler', + action='store_true', + dest='poppler', + default=False, + help="Indicates poppler tools (incl. pdftoppm) installed" + ) + + +def pytest_configure(config): + if not config.option.poppler: + setattr(config.option, 'markexpr', 'not poppler')
1
1
21,440
i think you want a different flag and help like `--ffmpeg` or something; and you need to mark any tests you want skipped
quiltdata-quilt
py
@@ -32,9 +32,10 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/empty" - "github.com/google/knative-gcp/pkg/apis/events/v1alpha1" auditpb "google.golang.org/genproto/googleapis/cloud/audit" logpb "google.golang.org/genproto/googleapis/logging/v2" + + "github.com/google/knative-gcp/pkg/apis/events/v1alpha1" ) const (
1
/* Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package converters import ( "bytes" "context" errors "errors" "fmt" "log" "reflect" "regexp" "strings" cloudevents "github.com/cloudevents/sdk-go" cepubsub "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/pubsub" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/empty" "github.com/google/knative-gcp/pkg/apis/events/v1alpha1" auditpb "google.golang.org/genproto/googleapis/cloud/audit" logpb "google.golang.org/genproto/googleapis/logging/v2" ) const ( AuditLogConverter = "com.google.cloud.auditlogs" logEntrySchema = "type.googleapis.com/google.logging.v2.LogEntry" loggingSource = "logging.googleapis.com" parentResourcePattern = `^(:?projects|organizations|billingAccounts|folders)/[^/]+` serviceNameExtension = "servicename" methodNameExtension = "methodname" resourceNameExtension = "resourcename" ) var ( jsonpbUnmarshaller = jsonpb.Unmarshaler{ AllowUnknownFields: true, AnyResolver: resolver(resolveAnyUnknowns), } jsonpbMarshaler = jsonpb.Marshaler{} parentResourceRegexp *regexp.Regexp ) func init() { var err error if parentResourceRegexp, err = regexp.Compile(parentResourcePattern); err != nil { log.Fatal(err) } } // Resolver function type that can be used to resolve Any fields in a jsonpb.Unmarshaler. type resolver func(turl string) (proto.Message, error) func (r resolver) Resolve(turl string) (proto.Message, error) { return r(turl) } type UnknownMsg empty.Empty func (m *UnknownMsg) ProtoMessage() { (*empty.Empty)(m).ProtoMessage() } func (m *UnknownMsg) Reset() { (*empty.Empty)(m).Reset() } func (m *UnknownMsg) String() string { return "Unknown message" } // Resolves type URLs such as // type.googleapis.com/google.profile.Person to a proto message // type. Resolves unknown message types to empty.Empty. func resolveAnyUnknowns(typeURL string) (proto.Message, error) { // Only the part of typeUrl after the last slash is relevant. mname := typeURL if slash := strings.LastIndex(mname, "/"); slash >= 0 { mname = mname[slash+1:] } mt := proto.MessageType(mname) if mt == nil { return (*UnknownMsg)(&empty.Empty{}), nil } return reflect.New(mt.Elem()).Interface().(proto.Message), nil } func convertAuditLog(ctx context.Context, msg *cepubsub.Message, sendMode ModeType) (*cloudevents.Event, error) { if msg == nil { return nil, fmt.Errorf("nil pubsub message") } entry := logpb.LogEntry{} if err := jsonpbUnmarshaller.Unmarshal(bytes.NewReader(msg.Data), &entry); err != nil { return nil, fmt.Errorf("failed to decode LogEntry: %w", err) } parentResource := parentResourceRegexp.FindString(entry.LogName) if parentResource == "" { return nil, fmt.Errorf("invalid LogName: %q", entry.LogName) } // Make a new event and convert the message payload. event := cloudevents.NewEvent(cloudevents.VersionV1) event.SetID(entry.InsertId + entry.LogName + ptypes.TimestampString(entry.Timestamp)) if timestamp, err := ptypes.Timestamp(entry.Timestamp); err != nil { return nil, fmt.Errorf("invalid LogEntry timestamp: %w", err) } else { event.SetTime(timestamp) } event.SetData(msg.Data) event.SetDataSchema(logEntrySchema) event.SetDataContentType(cloudevents.ApplicationJSON) switch payload := entry.Payload.(type) { case *logpb.LogEntry_ProtoPayload: var unpacked ptypes.DynamicAny if err := ptypes.UnmarshalAny(payload.ProtoPayload, &unpacked); err != nil { return nil, fmt.Errorf("unrecognized proto payload: %w", err) } switch proto := unpacked.Message.(type) { case *auditpb.AuditLog: event.SetSource(fmt.Sprintf("%s/%s", proto.ServiceName, parentResource)) event.SetSubject(fmt.Sprintf("%s/%s", proto.ServiceName, proto.ResourceName)) event.SetType(v1alpha1.AuditLogEventType) event.SetExtension(serviceNameExtension, proto.ServiceName) event.SetExtension(methodNameExtension, proto.MethodName) event.SetExtension(resourceNameExtension, proto.ResourceName) default: return nil, fmt.Errorf("unhandled proto payload type: %T", proto) } default: return nil, errors.New("non-AuditLog log entry") } return &event, nil }
1
10,359
this needed to change? or it was just some formatting thing?
google-knative-gcp
go
@@ -278,10 +278,11 @@ public class ProcessBesuNodeRunner implements BesuNodeRunner { params.add("--auto-log-bloom-caching-enabled"); params.add("false"); - String level = System.getProperty("root.log.level"); - if (level != null) { - params.add("--logging=" + level); - } + // String level = System.getProperty("root.log.level"); + // if (level != null) { + // params.add("--logging=" + level); + // } + params.add("--logging=TRACE"); LOG.info("Creating besu process with params {}", params); final ProcessBuilder processBuilder =
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.tests.acceptance.dsl.node; import static com.google.common.base.Preconditions.checkState; import static java.nio.charset.StandardCharsets.UTF_8; import org.hyperledger.besu.cli.options.NetworkingOptions; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis; import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration; import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration; import org.hyperledger.besu.plugin.services.metrics.MetricCategory; import org.hyperledger.besu.tests.acceptance.dsl.StaticNodesUtils; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.lang.ProcessBuilder.Redirect; import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.ThreadContext; public class ProcessBesuNodeRunner implements BesuNodeRunner { private static final Logger LOG = LogManager.getLogger(); private static final Logger PROCESS_LOG = LogManager.getLogger("org.hyperledger.besu.SubProcessLog"); private final Map<String, Process> besuProcesses = new HashMap<>(); private final ExecutorService outputProcessorExecutor = Executors.newCachedThreadPool(); ProcessBesuNodeRunner() { Runtime.getRuntime().addShutdownHook(new Thread(this::shutdown)); } @Override public void startNode(final BesuNode node) { if (ThreadContext.containsKey("node")) { LOG.error("ThreadContext node is already set to {}", ThreadContext.get("node")); } ThreadContext.put("node", node.getName()); final Path dataDir = node.homeDirectory(); final List<String> params = new ArrayList<>(); params.add("build/install/besu/bin/besu"); params.add("--data-path"); params.add(dataDir.toAbsolutePath().toString()); node.getRunCommand().ifPresent(params::add); if (node.isDevMode()) { params.add("--network"); params.add("DEV"); } params.add("--discovery-enabled"); params.add(Boolean.toString(node.isDiscoveryEnabled())); params.add("--p2p-host"); params.add(node.p2pListenHost()); params.add("--p2p-port"); params.add("0"); if (node.getMiningParameters().isMiningEnabled()) { params.add("--miner-enabled"); params.add("--miner-coinbase"); params.add(node.getMiningParameters().getCoinbase().get().toString()); params.add("--miner-stratum-port"); params.add(Integer.toString(node.getMiningParameters().getStratumPort())); params.add("--miner-stratum-host"); params.add(node.getMiningParameters().getStratumNetworkInterface()); params.add("--min-gas-price"); params.add( Integer.toString(node.getMiningParameters().getMinTransactionGasPrice().intValue())); } if (node.getMiningParameters().isStratumMiningEnabled()) { params.add("--miner-stratum-enabled"); } if (node.getPrivacyParameters().isEnabled()) { params.add("--privacy-enabled"); params.add("--privacy-url"); params.add(node.getPrivacyParameters().getEnclaveUri().toString()); if (node.getPrivacyParameters().isMultiTenancyEnabled()) { params.add("--privacy-multi-tenancy-enabled"); } else { params.add("--privacy-public-key-file"); params.add(node.getPrivacyParameters().getEnclavePublicKeyFile().getAbsolutePath()); } params.add("--privacy-precompiled-address"); params.add(String.valueOf(node.getPrivacyParameters().getPrivacyAddress())); params.add("--privacy-marker-transaction-signing-key-file"); params.add(node.homeDirectory().resolve("key").toString()); if (node.getPrivacyParameters().isOnchainPrivacyGroupsEnabled()) { params.add("--privacy-onchain-groups-enabled"); } } params.add("--bootnodes"); if (!node.getBootnodes().isEmpty()) { params.add(node.getBootnodes().stream().map(URI::toString).collect(Collectors.joining(","))); } if (node.hasStaticNodes()) { createStaticNodes(node); } if (node.isJsonRpcEnabled()) { params.add("--rpc-http-enabled"); params.add("--rpc-http-host"); params.add(node.jsonRpcListenHost().get()); params.add("--rpc-http-port"); params.add(node.jsonRpcListenPort().map(Object::toString).get()); params.add("--rpc-http-api"); params.add(apiList(node.jsonRpcConfiguration().getRpcApis())); if (node.jsonRpcConfiguration().isAuthenticationEnabled()) { params.add("--rpc-http-authentication-enabled"); } if (node.jsonRpcConfiguration().getAuthenticationCredentialsFile() != null) { params.add("--rpc-http-authentication-credentials-file"); params.add(node.jsonRpcConfiguration().getAuthenticationCredentialsFile()); } if (node.jsonRpcConfiguration().getAuthenticationPublicKeyFile() != null) { params.add("--rpc-http-authentication-jwt-public-key-file"); params.add(node.jsonRpcConfiguration().getAuthenticationPublicKeyFile().getAbsolutePath()); } } if (node.wsRpcEnabled()) { params.add("--rpc-ws-enabled"); params.add("--rpc-ws-host"); params.add(node.wsRpcListenHost().get()); params.add("--rpc-ws-port"); params.add(node.wsRpcListenPort().map(Object::toString).get()); params.add("--rpc-ws-api"); params.add(apiList(node.webSocketConfiguration().getRpcApis())); if (node.webSocketConfiguration().isAuthenticationEnabled()) { params.add("--rpc-ws-authentication-enabled"); } if (node.webSocketConfiguration().getAuthenticationCredentialsFile() != null) { params.add("--rpc-ws-authentication-credentials-file"); params.add(node.webSocketConfiguration().getAuthenticationCredentialsFile()); } if (node.webSocketConfiguration().getAuthenticationPublicKeyFile() != null) { params.add("--rpc-ws-authentication-jwt-public-key-file"); params.add( node.webSocketConfiguration().getAuthenticationPublicKeyFile().getAbsolutePath()); } } if (node.isMetricsEnabled()) { final MetricsConfiguration metricsConfiguration = node.getMetricsConfiguration(); params.add("--metrics-enabled"); params.add("--metrics-host"); params.add(metricsConfiguration.getHost()); params.add("--metrics-port"); params.add(Integer.toString(metricsConfiguration.getPort())); for (final MetricCategory category : metricsConfiguration.getMetricCategories()) { params.add("--metrics-category"); params.add(((Enum<?>) category).name()); } if (metricsConfiguration.isPushEnabled()) { params.add("--metrics-push-enabled"); params.add("--metrics-push-host"); params.add(metricsConfiguration.getPushHost()); params.add("--metrics-push-port"); params.add(Integer.toString(metricsConfiguration.getPushPort())); params.add("--metrics-push-interval"); params.add(Integer.toString(metricsConfiguration.getPushInterval())); params.add("--metrics-push-prometheus-job"); params.add(metricsConfiguration.getPrometheusJob()); } } node.getGenesisConfig() .ifPresent( genesis -> { final Path genesisFile = createGenesisFile(node, genesis); params.add("--genesis-file"); params.add(genesisFile.toAbsolutePath().toString()); }); if (!node.isP2pEnabled()) { params.add("--p2p-enabled"); params.add("false"); } else { final List<String> networkConfigParams = NetworkingOptions.fromConfig(node.getNetworkingConfiguration()).getCLIOptions(); params.addAll(networkConfigParams); } if (node.isRevertReasonEnabled()) { params.add("--revert-reason-enabled"); } params.add("--Xsecp256k1-native-enabled=" + node.isSecp256k1Native()); params.add("--Xaltbn128-native-enabled=" + node.isAltbn128Native()); node.getPermissioningConfiguration() .flatMap(PermissioningConfiguration::getLocalConfig) .ifPresent( permissioningConfiguration -> { if (permissioningConfiguration.isNodeWhitelistEnabled()) { params.add("--permissions-nodes-config-file-enabled"); } if (permissioningConfiguration.getNodePermissioningConfigFilePath() != null) { params.add("--permissions-nodes-config-file"); params.add(permissioningConfiguration.getNodePermissioningConfigFilePath()); } if (permissioningConfiguration.isAccountWhitelistEnabled()) { params.add("--permissions-accounts-config-file-enabled"); } if (permissioningConfiguration.getAccountPermissioningConfigFilePath() != null) { params.add("--permissions-accounts-config-file"); params.add(permissioningConfiguration.getAccountPermissioningConfigFilePath()); } }); node.getPermissioningConfiguration() .flatMap(PermissioningConfiguration::getSmartContractConfig) .ifPresent( permissioningConfiguration -> { if (permissioningConfiguration.isSmartContractNodeWhitelistEnabled()) { params.add("--permissions-nodes-contract-enabled"); } if (permissioningConfiguration.getNodeSmartContractAddress() != null) { params.add("--permissions-nodes-contract-address"); params.add(permissioningConfiguration.getNodeSmartContractAddress().toString()); } if (permissioningConfiguration.isSmartContractAccountWhitelistEnabled()) { params.add("--permissions-accounts-contract-enabled"); } if (permissioningConfiguration.getAccountSmartContractAddress() != null) { params.add("--permissions-accounts-contract-address"); params.add(permissioningConfiguration.getAccountSmartContractAddress().toString()); } }); params.addAll(node.getExtraCLIOptions()); params.add("--key-value-storage"); params.add("rocksdb"); params.add("--auto-log-bloom-caching-enabled"); params.add("false"); String level = System.getProperty("root.log.level"); if (level != null) { params.add("--logging=" + level); } LOG.info("Creating besu process with params {}", params); final ProcessBuilder processBuilder = new ProcessBuilder(params) .directory(new File(System.getProperty("user.dir")).getParentFile().getParentFile()) .redirectErrorStream(true) .redirectInput(Redirect.INHERIT); if (!node.getPlugins().isEmpty()) { processBuilder .environment() .put( "BESU_OPTS", "-Dbesu.plugins.dir=" + dataDir.resolve("plugins").toAbsolutePath().toString()); } try { checkState( isNotAliveOrphan(node.getName()), "A live process with name: %s, already exists. Cannot create another with the same name as it would orphan the first", node.getName()); final Process process = processBuilder.start(); process.onExit().thenRun(() -> node.setExitCode(process.exitValue())); outputProcessorExecutor.execute(() -> printOutput(node, process)); besuProcesses.put(node.getName(), process); } catch (final IOException e) { LOG.error("Error starting BesuNode process", e); } if (node.getRunCommand().isEmpty()) { waitForFile(dataDir, "besu.ports"); waitForFile(dataDir, "besu.networks"); } ThreadContext.remove("node"); } private boolean isNotAliveOrphan(final String name) { final Process orphan = besuProcesses.get(name); return orphan == null || !orphan.isAlive(); } private void printOutput(final BesuNode node, final Process process) { try (final BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) { String line = in.readLine(); while (line != null) { // would be nice to pass up the log level of the incoming log line PROCESS_LOG.info(line); line = in.readLine(); } } catch (final IOException e) { if (besuProcesses.containsKey(node.getName())) { LOG.error("Failed to read output from process for node " + node.getName(), e); } else { LOG.debug("Stdout from process {} closed", node.getName()); } } } private Path createGenesisFile(final BesuNode node, final String genesisConfig) { try { final Path genesisFile = Files.createTempFile(node.homeDirectory(), "genesis", ""); genesisFile.toFile().deleteOnExit(); Files.write(genesisFile, genesisConfig.getBytes(UTF_8)); return genesisFile; } catch (final IOException e) { throw new IllegalStateException(e); } } private void createStaticNodes(final BesuNode node) { StaticNodesUtils.createStaticNodesFile(node.homeDirectory(), node.getStaticNodes()); } private String apiList(final Collection<RpcApi> rpcApis) { return rpcApis.stream().map(RpcApis::getValue).collect(Collectors.joining(",")); } @Override public void stopNode(final BesuNode node) { node.stop(); if (besuProcesses.containsKey(node.getName())) { killBesuProcess(node.getName()); } else { LOG.error("There was a request to stop an unknown node: {}", node.getName()); } } @Override public synchronized void shutdown() { final Set<String> localMap = new HashSet<>(besuProcesses.keySet()); localMap.forEach(this::killBesuProcess); outputProcessorExecutor.shutdown(); try { if (!outputProcessorExecutor.awaitTermination(5, TimeUnit.SECONDS)) { LOG.error("Output processor executor did not shutdown cleanly."); } } catch (final InterruptedException e) { LOG.error("Interrupted while already shutting down", e); Thread.currentThread().interrupt(); } } @Override public boolean isActive(final String nodeName) { final Process process = besuProcesses.get(nodeName); return process != null && process.isAlive(); } private void killBesuProcess(final String name) { final Process process = besuProcesses.remove(name); if (process == null) { LOG.error("Process {} wasn't in our list", name); } if (!process.isAlive()) { LOG.info("Process {} already exited", name); return; } LOG.info("Killing {} process", name); process.destroy(); try { process.waitFor(2, TimeUnit.SECONDS); } catch (final InterruptedException e) { LOG.warn("Wait for death of process {} was interrupted", name, e); } if (process.isAlive()) { LOG.warn("Process {} still alive, destroying forcibly now", name); try { process.destroyForcibly().waitFor(2, TimeUnit.SECONDS); } catch (final Exception e) { // just die already } LOG.info("Process exited with code {}", process.exitValue()); } } }
1
22,642
What's the advantage of doing it this way over, say, changing the `.circleci/config.yaml` to have `TRACE` as the `root.log.level`?
hyperledger-besu
java
@@ -49,6 +49,15 @@ class ApiClient(object): dataset_id (str): id of the dataset to query. """ + @abc.abstractmethod + def fetch_bigquery_iam_policy(self, project_number, dataset_id): + """Gets IAM policy if a bigquery dataset from gcp API call. + + Args: + project_number (str): number of the project to query. + dataset_id (str): id of the dataset to query. + """ + @abc.abstractmethod def iter_bigquery_datasets(self, project_number): """Iterate Datasets from GCP API.
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GCP API client fassade.""" # pylint: disable=invalid-name,too-many-lines # pylint: disable=too-many-public-methods,too-many-instance-attributes import abc from google.cloud.forseti.common.gcp_api import admin_directory from google.cloud.forseti.common.gcp_api import appengine from google.cloud.forseti.common.gcp_api import bigquery from google.cloud.forseti.common.gcp_api import cloud_resource_manager from google.cloud.forseti.common.gcp_api import cloudbilling from google.cloud.forseti.common.gcp_api import cloudsql from google.cloud.forseti.common.gcp_api import compute from google.cloud.forseti.common.gcp_api import container from google.cloud.forseti.common.gcp_api import iam from google.cloud.forseti.common.gcp_api import servicemanagement from google.cloud.forseti.common.gcp_api import stackdriver_logging from google.cloud.forseti.common.gcp_api import storage class ResourceNotSupported(Exception): """Exception raised for resources not supported by the API client.""" class ApiClient(object): """The gcp api client interface""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def fetch_bigquery_dataset_policy(self, project_number, dataset_id): """Dataset policy Iterator for a dataset from gcp API call. Args: project_number (str): number of the project to query. dataset_id (str): id of the dataset to query. """ @abc.abstractmethod def iter_bigquery_datasets(self, project_number): """Iterate Datasets from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_billing_account_iam_policy(self, account_id): """Gets IAM policy of a Billing Account from GCP API. Args: account_id (str): id of the billing account to get policy. """ @abc.abstractmethod def fetch_billing_project_info(self, project_number): """Project Billing Info from gcp API call. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_billing_accounts(self): """Iterate visible Billing Accounts in an organization from GCP API.""" @abc.abstractmethod def iter_cloudsql_instances(self, project_number): """Iterate Cloud sql instances from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def is_compute_api_enabled(self, project_number): """Verifies the Compute API is enabled on a project. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_compute_ig_instances(self, project_number, instance_group_name, region=None, zone=None): """Get the instances for an instance group from GCP API. One and only one of zone (for zonal instance groups) and region (for regional instance groups) must be specified. Args: project_number (str): number of the project to query. instance_group_name (str): The instance group's name. region (str): The regional instance group's region. zone (str): The zonal instance group's zone. """ @abc.abstractmethod def fetch_compute_project(self, project_number): """Fetch compute project data from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_autoscalers(self, project_number): """Iterate Autoscalers from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_backendbuckets(self, project_number): """Iterate Backend buckets from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_backendservices(self, project_number): """Iterate Backend services from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_disks(self, project_number): """Iterate Compute Engine disks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_firewalls(self, project_number): """Iterate Compute Engine Firewalls from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_forwardingrules(self, project_number): """Iterate Forwarding Rules from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_healthchecks(self, project_number): """Iterate Health checks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_httphealthchecks(self, project_number): """Iterate HTTP Health checks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_httpshealthchecks(self, project_number): """Iterate HTTPS Health checks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_ig_managers(self, project_number): """Iterate Instance Group Manager from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_images(self, project_number): """Iterate Images from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_instancegroups(self, project_number): """Iterate Compute Engine groups from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_instances(self, project_number): """Iterate compute engine instance from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_instancetemplates(self, project_number): """Iterate Instance Templates from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_licenses(self, project_number): """Iterate Licenses from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_networks(self, project_number): """Iterate Networks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_routers(self, project_number): """Iterate Compute Engine routers from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_snapshots(self, project_number): """Iterate Compute Engine snapshots from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_sslcertificates(self, project_number): """Iterate SSL Certificates from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_subnetworks(self, project_number): """Iterate Subnetworks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_targethttpproxies(self, project_number): """Iterate Target HTTP proxies from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_targethttpsproxies(self, project_number): """Iterate Target HTTPS proxies from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_targetinstances(self, project_number): """Iterate Target Instances from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_targetpools(self, project_number): """Iterate Target Pools from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_targetsslproxies(self, project_number): """Iterate Target SSL proxies from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_targettcpproxies(self, project_number): """Iterate Target TCP proxies from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_compute_urlmaps(self, project_number): """Iterate URL maps from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_container_serviceconfig(self, project_id, zone=None, location=None): """Fetch Kubernetes Engine per zone service config from GCP API. Args: project_id (str): id of the project to query. zone (str): zone of the Kubernetes Engine. location (str): location of the Kubernetes Engine. """ @abc.abstractmethod def iter_container_clusters(self, project_number): """Iterate Kubernetes Engine Cluster from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_crm_folder(self, folder_id): """Fetch Folder data from GCP API. Args: folder_id (str): id of the folder to query. """ @abc.abstractmethod def fetch_crm_folder_iam_policy(self, folder_id): """Folder IAM policy in a folder from gcp API call. Args: folder_id (str): id of the folder to get policy. """ @abc.abstractmethod def fetch_crm_organization(self, org_id): """Fetch Organization data from GCP API. Args: org_id (str): id of the organization to get. """ @abc.abstractmethod def fetch_crm_organization_iam_policy(self, org_id): """Organization IAM policy from gcp API call. Args: org_id (str): id of the organization to get policy. """ @abc.abstractmethod def fetch_crm_project(self, project_number): """Fetch Project data from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_crm_project_iam_policy(self, project_number): """Project IAM policy from gcp API call. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_crm_folder_org_policies(self, folder_id): """Folder organization policies from gcp API call. Args: folder_id (str): id of the folder to get policy. """ @abc.abstractmethod def iter_crm_folders(self, parent_id): """Iterate Folders from GCP API. Args: parent_id (str): id of the parent of the folder. """ @abc.abstractmethod def iter_crm_organization_org_policies(self, org_id): """Organization organization policies from gcp API call. Args: org_id (str): id of the organization to get policy. """ @abc.abstractmethod def iter_crm_project_liens(self, project_number): """Iterate Liens from GCP API. Args: project_number (str): number of the parent project of the lien. """ @abc.abstractmethod def iter_crm_project_org_policies(self, project_number): """Project organization policies from gcp API call. Args: project_number (str): number of the parent project of the policy. """ @abc.abstractmethod def iter_crm_projects(self, parent_type, parent_id): """Iterate Projects from GCP API. Args: parent_type (str): type of the parent, "folder" or "organization". parent_id (str): id of the parent of the folder. """ @abc.abstractmethod def iter_dns_managedzones(self, project_number): """Iterate CloudDNS Managed Zones from GCP API. Args: project_number (str): number of the parent project. """ @abc.abstractmethod def iter_dns_policies(self, project_number): """Iterate CloudDNS Policies from GCP API. Args: project_number (str): number of the parent project of the policy. """ @abc.abstractmethod def fetch_gae_app(self, project_id): """Fetch the AppEngine App. Args: project_id (str): id of the project to query. """ @abc.abstractmethod def iter_gae_instances(self, project_id, service_id, version_id): """Iterate gae instances from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. version_id (str): id of the appengine version. """ @abc.abstractmethod def iter_gae_services(self, project_id): """Iterate gae services from GCP API. Args: project_id (str): id of the project to query. """ @abc.abstractmethod def iter_gae_versions(self, project_id, service_id): """Iterate gae versions from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. """ @abc.abstractmethod def iter_gsuite_group_members(self, group_key): """Iterate Gsuite group members from GCP API. Args: group_key (str): key of the group to get. """ @abc.abstractmethod def iter_gsuite_groups(self, gsuite_id): """Iterate Gsuite groups from GCP API. Args: gsuite_id (str): Gsuite id. """ @abc.abstractmethod def iter_gsuite_users(self, gsuite_id): """Iterate Gsuite users from GCP API. Args: gsuite_id (str): Gsuite id. """ @abc.abstractmethod def fetch_iam_serviceaccount_iam_policy(self, name, unique_id): """Service Account IAM policy from gcp API call. Args: name (str): The service account name to query, must be in the format projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} unique_id (str): The unique id of the service account. """ @abc.abstractmethod def iter_iam_curated_roles(self): """Iterate Curated roles in an organization from GCP API. """ @abc.abstractmethod def iter_iam_organization_roles(self, org_id): """Iterate Organization roles from GCP API. Args: org_id (str): id of the organization to get. """ @abc.abstractmethod def iter_iam_project_roles(self, project_id, project_number): """Iterate Project roles in a project from GCP API. Args: project_id (str): id of the project to query. project_number (str): number of the project to query. """ @abc.abstractmethod def iter_iam_serviceaccount_exported_keys(self, name): """Iterate Service Account User Managed Keys from GCP API. Args: name (str): name of the service account. """ @abc.abstractmethod def iter_iam_serviceaccounts(self, project_id, project_number): """Iterate Service Accounts in a project from GCP API. Args: project_id (str): id of the project to query. project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_kms_cryptokey_iam_policy(self, cryptokey): """Fetch KMS Cryptokey IAM Policy from GCP API. Args: cryptokey (str): The KMS cryptokey to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/ cryptoKeys/{CRYPTOKEY_NAME} """ @abc.abstractmethod def fetch_kms_keyring_iam_policy(self, keyring): """Fetch KMS Keyring IAM Policy from GCP API. Args: keyring (str): The KMS keyring to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME} """ @abc.abstractmethod def iter_kms_cryptokeys(self, parent): """Iterate KMS Cryptokeys in a keyring from GCP API. Args: parent (str): The KMS keyring to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME} """ @abc.abstractmethod def iter_kms_cryptokeyversions(self, parent): """Iterate KMS Cryptokey Versions from GCP API. Args: parent (str): The KMS keyring to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/ cryptoKeys/{CRYPTOKEY_NAME} """ @abc.abstractmethod def iter_kms_keyrings(self, project_id, location=None): """Iterate KMS Keyrings in a project from GCP API. Args: project_id (str): id of the project to query. location (str): The location to query. Not required when using Cloud Asset API. """ @abc.abstractmethod def fetch_pubsub_topic_iam_policy(self, name): """PubSub Topic IAM policy from gcp API call. Args: name (str): The pubsub topic to query, must be in the format projects/{PROJECT_ID}/topics/{TOPIC_NAME} """ @abc.abstractmethod def iter_pubsub_topics(self, project_id, project_number): """Iterate PubSub topics from GCP API. Args: project_id (str): id of the project to query. project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_services_enabled_apis(self, project_number): """Project enabled API services from gcp API call. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_spanner_instances(self, project_number): """Iterate Spanner Instances from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_spanner_databases(self, parent): """Iterate Spanner Databases from GCP API. Args: parent (str): parent spanner instance to query. """ @abc.abstractmethod def iter_stackdriver_billing_account_sinks(self, acct_id): """Iterate Billing Account logging sinks from GCP API. Args: acct_id (str): id of the billing account to query. """ @abc.abstractmethod def iter_stackdriver_folder_sinks(self, folder_id): """Iterate Folder logging sinks from GCP API. Args: folder_id (str): id of the folder to query. """ @abc.abstractmethod def iter_stackdriver_organization_sinks(self, org_id): """Iterate Organization logging sinks from GCP API. Args: org_id (str): id of the organization to query. """ @abc.abstractmethod def iter_stackdriver_project_sinks(self, project_number): """Iterate Project logging sinks from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_storage_bucket_acls(self, bucket_id, project_id, project_number): """Bucket Access Controls from GCP API. Args: bucket_id (str): id of the bucket to query. project_id (str): id of the project to query. project_number (str): number of the project to query. """ @abc.abstractmethod def fetch_storage_bucket_iam_policy(self, bucket_id): """Bucket IAM policy Iterator from gcp API call. Args: bucket_id (str): id of the bucket to query. """ @abc.abstractmethod def fetch_storage_object_iam_policy(self, bucket_name, object_name): """Object IAM policy Iterator for an object from gcp API call. Args: bucket_name (str): name of the bucket. object_name (str): name of the object. """ @abc.abstractmethod def iter_storage_buckets(self, project_number): """Iterate Buckets from GCP API. Args: project_number (str): number of the project to query. """ @abc.abstractmethod def iter_storage_objects(self, bucket_id): """Iterate Objects from GCP API. Args: bucket_id (str): id of the bucket to get. """ def create_lazy(attribute, factory): """Create attributes right before they are needed. Args: attribute (str): Attribute name to check/create. factory (function): Factory to create object. Returns: function: Decorator. """ def f_wrapper(func): """Create decorator. Args: func (function): Function to wrap. Returns: function: Decorator. """ def wrapper(*args, **kwargs): """Decorator implementation. Args: *args (list): Original func arguments. **kwargs (dict): Original func arguments. Returns: object: Result produced by the wrapped func. """ this = args[0] if not hasattr(this, attribute) or not getattr(this, attribute): setattr(this, attribute, factory(this)) return func(*args, **kwargs) return wrapper return f_wrapper def is_api_disabled(config, api_name): """Check if api_name is disabled in the config. Args: config (dict): GCP API client configuration. api_name (str): The name of the GCP api to check. Returns: bool: True if the API is disabled in the configuration, else False. """ return config.get(api_name, {}).get('disable_polling', False) class ApiClientImpl(ApiClient): """The gcp api client Implementation""" def __init__(self, config): """Initialize. Args: config (dict): GCP API client configuration. """ self.ad = None self.appengine = None self.bigquery = None self.crm = None self.cloudbilling = None self.cloudsql = None self.compute = None self.container = None self.iam = None self.servicemanagement = None self.stackdriver_logging = None self.storage = None self.config = config def _create_ad(self): """Create admin directory API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, admin_directory.API_NAME): raise ResourceNotSupported('Admin API disabled by server ' 'configuration.') return admin_directory.AdminDirectoryClient(self.config) def _create_appengine(self): """Create AppEngine API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, appengine.API_NAME): raise ResourceNotSupported('AppEngine API disabled by server ' 'configuration.') return appengine.AppEngineClient(self.config) def _create_bq(self): """Create bigquery API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, bigquery.API_NAME): raise ResourceNotSupported('Bigquery API disabled by server ' 'configuration.') return bigquery.BigQueryClient(self.config) def _create_crm(self): """Create resource manager API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, cloud_resource_manager.API_NAME): raise ResourceNotSupported('Resource Manager API disabled by ' 'server configuration.') return cloud_resource_manager.CloudResourceManagerClient(self.config) def _create_cloudbilling(self): """Create cloud billing API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, cloudbilling.API_NAME): raise ResourceNotSupported('Cloud Billing API disabled by server ' 'configuration.') return cloudbilling.CloudBillingClient(self.config) def _create_cloudsql(self): """Create cloud sql API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, cloudsql.API_NAME): raise ResourceNotSupported('CloudSQL Admin API disabled by server ' 'configuration.') return cloudsql.CloudsqlClient(self.config) def _create_compute(self): """Create compute API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, compute.API_NAME): raise ResourceNotSupported('Compute Engine API disabled by server ' 'configuration.') return compute.ComputeClient(self.config) def _create_container(self): """Create Kubernetes Engine API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, container.API_NAME): raise ResourceNotSupported('Kubernetes Engine API disabled by ' 'server configuration.') return container.ContainerClient(self.config) def _create_iam(self): """Create IAM API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, iam.API_NAME): raise ResourceNotSupported('IAM API disabled by server ' 'configuration.') return iam.IAMClient(self.config) def _create_servicemanagement(self): """Create servicemanagement API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, servicemanagement.API_NAME): raise ResourceNotSupported('Service Management API disabled by ' 'server configuration.') return servicemanagement.ServiceManagementClient(self.config) def _create_stackdriver_logging(self): """Create stackdriver_logging API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, stackdriver_logging.API_NAME): raise ResourceNotSupported('Stackdriver Logging API disabled by ' 'server configuration.') return stackdriver_logging.StackdriverLoggingClient(self.config) def _create_storage(self): """Create storage API client. Returns: object: Client. Raises: ResourceNotSupported: Raised if polling is disabled for this API in the GCP API client configuration. """ if is_api_disabled(self.config, storage.API_NAME): raise ResourceNotSupported('Storage API disabled by server ' 'configuration.') return storage.StorageClient(self.config) @create_lazy('bigquery', _create_bq) def fetch_bigquery_dataset_policy(self, project_number, dataset_id): """Dataset policy Iterator for a dataset from gcp API call. Args: project_number (str): number of the project to query. dataset_id (str): id of the dataset to query. Returns: dict: Dataset Policy. """ return self.bigquery.get_dataset_access(project_number, dataset_id) @create_lazy('bigquery', _create_bq) def iter_bigquery_datasets(self, project_number): """Iterate Datasets from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of datasets. """ for dataset in self.bigquery.get_datasets_for_projectid(project_number): yield dataset @create_lazy('cloudbilling', _create_cloudbilling) def fetch_billing_account_iam_policy(self, account_id): """Gets IAM policy of a Billing Account from GCP API. Args: account_id (str): id of the billing account to get policy. Returns: dict: Billing Account IAM policy. """ return self.cloudbilling.get_billing_acct_iam_policies(account_id) @create_lazy('cloudbilling', _create_cloudbilling) def fetch_billing_project_info(self, project_number): """Project Billing Info from gcp API call. Args: project_number (str): number of the project to query. Returns: dict: Project Billing Info resource. """ return self.cloudbilling.get_billing_info(project_number) @create_lazy('cloudbilling', _create_cloudbilling) def iter_billing_accounts(self): """Iterate visible Billing Accounts in an organization from GCP API. Yields: dict: Generator of billing accounts. """ for account in self.cloudbilling.get_billing_accounts(): yield account @create_lazy('cloudsql', _create_cloudsql) def iter_cloudsql_instances(self, project_number): """Iterate Cloud sql instances from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of cloudsql instance. """ for item in self.cloudsql.get_instances(project_number): yield item @create_lazy('compute', _create_compute) def is_compute_api_enabled(self, project_number): """Verifies the Compute API is enabled on a project. Args: project_number (str): number of the project to query. Returns: bool: True if API is enabled, else False. """ return self.compute.is_api_enabled(project_number) @create_lazy('compute', _create_compute) def fetch_compute_ig_instances(self, project_number, instance_group_name, region=None, zone=None): """Get the instances for an instance group from GCP API. One and only one of zone (for zonal instance groups) and region (for regional instance groups) must be specified. Args: project_number (str): number of the project to query. instance_group_name (str): The instance group's name. region (str): The regional instance group's region. zone (str): The zonal instance group's zone. Returns: list: instance URLs for this instance group. """ return self.compute.get_instance_group_instances(project_number, instance_group_name, region, zone) @create_lazy('compute', _create_compute) def fetch_compute_project(self, project_number): """Fetch compute project data from GCP API. Args: project_number (str): number of the project to query. Returns: dict: Compute project metadata resource. """ return self.compute.get_project(project_number) def iter_compute_autoscalers(self, project_number): """Iterate Autoscalers from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute Autoscalers are not supported by ' 'this API client') def iter_compute_backendbuckets(self, project_number): """Iterate Backend buckets from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute BackendBuckets are not supported ' 'by this API client') @create_lazy('compute', _create_compute) def iter_compute_backendservices(self, project_number): """Iterate Backend services from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of backend service. """ for backendservice in self.compute.get_backend_services(project_number): yield backendservice @create_lazy('compute', _create_compute) def iter_compute_disks(self, project_number): """Iterate Compute Engine disks from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of Compute Disk. """ for disk in self.compute.get_disks(project_number): yield disk @create_lazy('compute', _create_compute) def iter_compute_firewalls(self, project_number): """Iterate Compute Engine Firewalls from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of Compute Engine Firewall. """ for rule in self.compute.get_firewall_rules(project_number): yield rule @create_lazy('compute', _create_compute) def iter_compute_forwardingrules(self, project_number): """Iterate Forwarding Rules from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of forwarding rule resources. """ for forwardingrule in self.compute.get_forwarding_rules(project_number): yield forwardingrule def iter_compute_healthchecks(self, project_number): """Iterate Health checks from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute HealthChecks are not supported by ' 'this API client') def iter_compute_httphealthchecks(self, project_number): """Iterate HTTP Health checks from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute HttpHealthChecks are not supported ' 'by this API client') def iter_compute_httpshealthchecks(self, project_number): """Iterate HTTPS Health checks from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute HttpsHealthChecks are not ' 'supported by this API client') @create_lazy('compute', _create_compute) def iter_compute_ig_managers(self, project_number): """Iterate Instance Group Manager from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of instance group manager resources. """ for igmanager in self.compute.get_instance_group_managers( project_number): yield igmanager @create_lazy('compute', _create_compute) def iter_compute_images(self, project_number): """Iterate Images from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of image resources. """ for image in self.compute.get_images(project_number): yield image @create_lazy('compute', _create_compute) def iter_compute_instancegroups(self, project_number): """Iterate Compute Engine groups from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of Compute Instance group. """ for instancegroup in self.compute.get_instance_groups( project_number, include_instance_urls=False): yield instancegroup @create_lazy('compute', _create_compute) def iter_compute_instances(self, project_number): """Iterate compute engine instance from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of Compute Engine Instance. """ for instance in self.compute.get_instances(project_number): yield instance @create_lazy('compute', _create_compute) def iter_compute_instancetemplates(self, project_number): """Iterate Instance Templates from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of instance template resources. """ for instancetemplate in self.compute.get_instance_templates( project_number): yield instancetemplate def iter_compute_licenses(self, project_number): """Iterate Licenses from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute Licenses are not supported by ' 'this API client') @create_lazy('compute', _create_compute) def iter_compute_networks(self, project_number): """Iterate Networks from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of network resources. """ for network in self.compute.get_networks(project_number): yield network def iter_compute_routers(self, project_number): """Iterate Compute Engine routers from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute Routers are not supported ' 'by this API client') @create_lazy('compute', _create_compute) def iter_compute_snapshots(self, project_number): """Iterate Compute Engine snapshots from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of Compute Snapshots. """ for snapshot in self.compute.get_snapshots(project_number): yield snapshot def iter_compute_sslcertificates(self, project_number): """Iterate SSL Certificates from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute SslCertificates are not supported ' 'by this API client') @create_lazy('compute', _create_compute) def iter_compute_subnetworks(self, project_number): """Iterate Subnetworks from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of subnetwork resources. """ for subnetwork in self.compute.get_subnetworks(project_number): yield subnetwork def iter_compute_targethttpproxies(self, project_number): """Iterate Target HTTP proxies from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetHttpProxies are not ' 'supported by this API client') def iter_compute_targethttpsproxies(self, project_number): """Iterate Target HTTPS proxies from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetHttpsProxies are not ' 'supported by this API client') def iter_compute_targetinstances(self, project_number): """Iterate Target Instances from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetInstances are not ' 'supported by this API client') def iter_compute_targetpools(self, project_number): """Iterate Target Pools from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetPools are not ' 'supported by this API client') def iter_compute_targetsslproxies(self, project_number): """Iterate Target SSL proxies from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetSslProxies are not ' 'supported by this API client') def iter_compute_targettcpproxies(self, project_number): """Iterate Target TCP proxies from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetTcpProxies are not ' 'supported by this API client') def iter_compute_urlmaps(self, project_number): """Iterate URL maps from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute UrlMaps are not supported by this ' 'API client') @create_lazy('container', _create_container) def fetch_container_serviceconfig(self, project_id, zone=None, location=None): """Fetch Kubernetes Engine per zone service config from GCP API. Args: project_id (str): id of the project to query. zone (str): zone of the Kubernetes Engine. location (str): location of the Kubernetes Engine. Returns: dict: Generator of Kubernetes Engine Cluster resources. """ return self.container.get_serverconfig(project_id, zone=zone, location=location) @create_lazy('container', _create_container) def iter_container_clusters(self, project_number): """Iterate Kubernetes Engine Cluster from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of Kubernetes Engine Cluster resources. """ for cluster in self.container.get_clusters(project_number): # Don't store the master auth data in the database. if 'masterAuth' in cluster: cluster['masterAuth'] = { k: '[redacted]' for k in cluster['masterAuth'].keys()} yield cluster @create_lazy('crm', _create_crm) def fetch_crm_folder(self, folder_id): """Fetch Folder data from GCP API. Args: folder_id (str): id of the folder to query. Returns: dict: Generator of folder. """ return self.crm.get_folder(folder_id) @create_lazy('crm', _create_crm) def fetch_crm_folder_iam_policy(self, folder_id): """Folder IAM policy in a folder from gcp API call. Args: folder_id (str): id of the folder to get policy. Returns: dict: Folder IAM policy. """ return self.crm.get_folder_iam_policies(folder_id) @create_lazy('crm', _create_crm) def fetch_crm_organization(self, org_id): """Fetch Organization data from GCP API. Args: org_id (str): id of the organization to get. Returns: dict: Generator of organization. """ return self.crm.get_organization(org_id) @create_lazy('crm', _create_crm) def fetch_crm_organization_iam_policy(self, org_id): """Organization IAM policy from gcp API call. Args: org_id (str): id of the organization to get policy. Returns: dict: Organization IAM policy. """ return self.crm.get_org_iam_policies(org_id) @create_lazy('crm', _create_crm) def fetch_crm_project(self, project_number): """Fetch Project data from GCP API. Args: project_number (str): number of the project to query. Returns: dict: Generator of project. """ return self.crm.get_project(project_number) @create_lazy('crm', _create_crm) def fetch_crm_project_iam_policy(self, project_number): """Project IAM policy from gcp API call. Args: project_number (str): number of the project to query. Returns: dict: Project IAM Policy. """ return self.crm.get_project_iam_policies(project_number) @create_lazy('crm', _create_crm) def iter_crm_folder_org_policies(self, folder_id): """Folder organization policies from gcp API call. Args: folder_id (str): id of the folder to get policy. Yields: dict: Generator of org policies. """ for org_policy in self.crm.get_folder_org_policies(folder_id): yield org_policy @create_lazy('crm', _create_crm) def iter_crm_folders(self, parent_id): """Iterate Folders from GCP API. Args: parent_id (str): id of the parent of the folder. Yields: dict: Generator of folders. """ for folder in self.crm.get_folders(parent_id): yield folder @create_lazy('crm', _create_crm) def iter_crm_organization_org_policies(self, org_id): """Organization organization policies from gcp API call. Args: org_id (str): id of the organization to get policy. Yields: dict: Generator of org policies. """ for org_policy in self.crm.get_org_org_policies(org_id): yield org_policy @create_lazy('crm', _create_crm) def iter_crm_project_liens(self, project_number): """Iterate Liens from GCP API. Args: project_number (str): number of the parent project of the lien. Yields: dict: Generator of liens. """ for lien in self.crm.get_project_liens(project_number): yield lien @create_lazy('crm', _create_crm) def iter_crm_project_org_policies(self, project_number): """Project organization policies from gcp API call. Args: project_number (str): number of the parent project of the policy. Yields: dict: Generator of org policies. """ for org_policy in self.crm.get_project_org_policies(project_number): yield org_policy @create_lazy('crm', _create_crm) def iter_crm_projects(self, parent_type, parent_id): """Iterate Projects from GCP API. Args: parent_type (str): type of the parent, "folder" or "organization". parent_id (str): id of the parent of the folder. Yields: dict: Generator of projects. """ for page in self.crm.get_projects(parent_id=parent_id, parent_type=parent_type): for project in page.get('projects', []): yield project def iter_dns_managedzones(self, project_number): """Iterate CloudDNS Managed Zones from GCP API. Args: project_number (str): number of the parent project. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Cloud DNS Managed Zones are not supported ' 'by this API client') def iter_dns_policies(self, project_number): """Iterate CloudDNS Policies from GCP API. Args: project_number (str): number of the parent project of the policy. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Cloud DNS Policies are not supported by ' 'this API client') @create_lazy('appengine', _create_appengine) def fetch_gae_app(self, project_id): """Fetch the AppEngine App. Args: project_id (str): id of the project to query. Returns: dict: AppEngine App resource. """ return self.appengine.get_app(project_id) @create_lazy('appengine', _create_appengine) def iter_gae_instances(self, project_id, service_id, version_id): """Iterate gae instances from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. version_id (str): version id of the appengine. Yields: dict: Generator of AppEngine Instance resources. """ for instance in self.appengine.list_instances( project_id, service_id, version_id): yield instance @create_lazy('appengine', _create_appengine) def iter_gae_services(self, project_id): """Iterate gae services from GCP API. Args: project_id (str): id of the project to query. Yields: dict: Generator of AppEngine Service resources. """ for service in self.appengine.list_services(project_id): yield service @create_lazy('appengine', _create_appengine) def iter_gae_versions(self, project_id, service_id): """Iterate gae versions from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. Yields: dict: Generator of AppEngine Version resources. """ for version in self.appengine.list_versions(project_id, service_id): yield version @create_lazy('ad', _create_ad) def iter_gsuite_group_members(self, group_key): """Iterate Gsuite group members from GCP API. Args: group_key (str): key of the group to get. Yields: dict: Generator of group_member """ for member in self.ad.get_group_members(group_key): yield member @create_lazy('ad', _create_ad) def iter_gsuite_groups(self, gsuite_id): """Iterate Gsuite groups from GCP API. Args: gsuite_id (str): Gsuite id. Yields: dict: Generator of groups. """ result = self.ad.get_groups(gsuite_id) for group in result: yield group @create_lazy('ad', _create_ad) def iter_gsuite_users(self, gsuite_id): """Iterate Gsuite users from GCP API. Args: gsuite_id (str): Gsuite id. Yields: dict: Generator of user. """ for user in self.ad.get_users(gsuite_id): yield user @create_lazy('iam', _create_iam) def fetch_iam_serviceaccount_iam_policy(self, name, unique_id): """Service Account IAM policy from gcp API call. Args: name (str): The service account name to query, must be in the format projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} unique_id (str): The unique id of the service account. Returns: dict: Service Account IAM policy. """ del unique_id # Used by CAI, not the API. return self.iam.get_service_account_iam_policy(name) @create_lazy('iam', _create_iam) def iter_iam_curated_roles(self): """Iterate Curated roles in an organization from GCP API. Yields: dict: Generator of curated roles. """ for role in self.iam.get_curated_roles(): yield role @create_lazy('iam', _create_iam) def iter_iam_organization_roles(self, org_id): """Iterate Organization roles from GCP API. Args: org_id (str): id of the organization to get. Yields: dict: Generator of organization role. """ for role in self.iam.get_organization_roles(org_id): yield role @create_lazy('iam', _create_iam) def iter_iam_project_roles(self, project_id, project_number): """Iterate Project roles in a project from GCP API. Args: project_id (str): id of the project to query. project_number (str): number of the project to query. Yields: dict: Generator of project roles. """ del project_number # Used by CAI, not the API. for role in self.iam.get_project_roles(project_id): yield role @create_lazy('iam', _create_iam) def iter_iam_serviceaccount_exported_keys(self, name): """Iterate Service Account User Managed Keys from GCP API. Args: name (str): name of the service account. Yields: dict: Generator of service account user managed (exported) keys """ for key in self.iam.get_service_account_keys( name, key_type=iam.IAMClient.USER_MANAGED): yield key @create_lazy('iam', _create_iam) def iter_iam_serviceaccounts(self, project_id, project_number): """Iterate Service Accounts in a project from GCP API. Args: project_id (str): id of the project to query. project_number (str): number of the project to query. Yields: dict: Generator of service account. """ del project_number # Used by CAI, not the API. for serviceaccount in self.iam.get_service_accounts(project_id): yield serviceaccount def fetch_kms_cryptokey_iam_policy(self, cryptokey): """Fetch KMS Cryptokey IAM Policy from GCP API. Args: cryptokey (str): The KMS cryptokey to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/ cryptoKeys/{CRYPTOKEY_NAME} Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Key Management Service is not supported by ' 'this API client') def fetch_kms_keyring_iam_policy(self, keyring): """Fetch KMS Keyring IAM Policy from GCP API. Args: keyring (str): The KMS keyring to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME} Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Key Management Service is not supported by ' 'this API client') def iter_kms_cryptokeys(self, parent): """Iterate KMS Cryptokeys in a keyring from GCP API. Args: parent (str): The KMS keyring to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME} Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Key Management Service is not supported by ' 'this API client') def iter_kms_cryptokeyversions(self, parent): """Iterate KMS Cryptokey Versions from GCP API. Args: parent (str): The KMS keyring to query, must be in the format projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/ cryptoKeys/{CRYPTOKEY_NAME} Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Key Management Service is not supported by ' 'this API client') def iter_kms_keyrings(self, project_id, location=None): """Iterate KMS Keyrings in a project from GCP API. Args: project_id (str): id of the project to query. location (str): The location to query. Not required when using Cloud Asset API. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Key Management Service is not supported by ' 'this API client') def fetch_pubsub_topic_iam_policy(self, name): """PubSub Topic IAM policy from gcp API call. Args: name (str): The pubsub topic to query, must be in the format projects/{PROJECT_ID}/topics/{TOPIC_NAME} Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('PubSub Topics are not supported by this ' 'API client') def iter_pubsub_topics(self, project_id, project_number): """Iterate PubSub topics from GCP API. Args: project_id (str): id of the project to query. project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('PubSub Topics are not supported by this ' 'API client') @create_lazy('servicemanagement', _create_servicemanagement) def fetch_services_enabled_apis(self, project_number): """Project enabled API services from gcp API call. Args: project_number (str): number of the project to query. Returns: list: A list of ManagedService resource dicts. """ return self.servicemanagement.get_enabled_apis(project_number) def iter_spanner_instances(self, project_number): """Iterate Spanner Instances from GCP API. Args: project_number (str): number of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Spanner Instances are not supported by ' 'this API client') def iter_spanner_databases(self, parent): """Iterate Spanner Databases from GCP API. Args: parent (str): parent spanner instance to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Spanner Databases are not supported by ' 'this API client') @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_billing_account_sinks(self, acct_id): """Iterate Billing Account logging sinks from GCP API. Args: acct_id (str): id of the billing account to query. Yields: dict: Generator of billing account logging sinks. """ for sink in self.stackdriver_logging.get_billing_account_sinks(acct_id): yield sink @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_folder_sinks(self, folder_id): """Iterate Folder logging sinks from GCP API. Args: folder_id (str): id of the folder to query. Yields: dict: Generator of folder logging sinks. """ for sink in self.stackdriver_logging.get_folder_sinks(folder_id): yield sink @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_organization_sinks(self, org_id): """Iterate Organization logging sinks from GCP API. Args: org_id (str): id of the organization to query. Yields: dict: Generator of organization logging sinks. """ for sink in self.stackdriver_logging.get_organization_sinks(org_id): yield sink @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_project_sinks(self, project_number): """Iterate Project logging sinks from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of project logging sinks. """ for sink in self.stackdriver_logging.get_project_sinks(project_number): yield sink @create_lazy('storage', _create_storage) def fetch_storage_bucket_acls(self, bucket_id, project_id, project_number): """Bucket Access Controls from GCP API. Args: bucket_id (str): id of the bucket to query. project_id (str): id of the project to query. project_number (str): number of the project to query. Returns: list: Bucket Access Controls. """ del project_id, project_number return self.storage.get_bucket_acls(bucket_id) @create_lazy('storage', _create_storage) def fetch_storage_bucket_iam_policy(self, bucket_id): """Bucket IAM policy Iterator from gcp API call. Args: bucket_id (str): id of the bucket to query. Returns: dict: Bucket IAM policy. """ return self.storage.get_bucket_iam_policy(bucket_id) @create_lazy('storage', _create_storage) def fetch_storage_object_iam_policy(self, bucket_name, object_name): """Object IAM policy Iterator for an object from gcp API call. Args: bucket_name (str): name of the bucket. object_name (str): name of the object. Returns: dict: Object IAM policy. """ return self.storage.get_storage_object_iam_policy(bucket_name, object_name) @create_lazy('storage', _create_storage) def iter_storage_buckets(self, project_number): """Iterate Buckets from GCP API. Args: project_number (str): number of the project to query. Yields: dict: Generator of buckets. """ for bucket in self.storage.get_buckets(project_number): yield bucket @create_lazy('storage', _create_storage) def iter_storage_objects(self, bucket_id): """Iterate Objects from GCP API. Args: bucket_id (str): id of the bucket to get. Yields: dict: Generator of objects. """ for gcs_object in self.storage.get_objects(bucket_name=bucket_id): yield gcs_object
1
32,870
typo: if -> of
forseti-security-forseti-security
py
@@ -270,7 +270,10 @@ func (templateContext) funcMarkdown(input interface{}) (string, error) { buf.Reset() defer bufPool.Put(buf) - md.Convert([]byte(inputStr), buf) + err := md.Convert([]byte(inputStr), buf) + if err != nil { + return "", err + } return buf.String(), nil }
1
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package templates import ( "bytes" "fmt" "io" "net" "net/http" "os" "path" "strconv" "strings" "sync" "text/template" "github.com/Masterminds/sprig/v3" "github.com/alecthomas/chroma/formatters/html" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/yuin/goldmark" highlighting "github.com/yuin/goldmark-highlighting" "github.com/yuin/goldmark/extension" "github.com/yuin/goldmark/parser" gmhtml "github.com/yuin/goldmark/renderer/html" ) // templateContext is the templateContext with which HTTP templates are executed. type templateContext struct { Root http.FileSystem Req *http.Request Args []interface{} // defined by arguments to funcInclude RespHeader tplWrappedHeader config *Templates } // OriginalReq returns the original, unmodified, un-rewritten request as // it originally came in over the wire. func (c templateContext) OriginalReq() http.Request { or, _ := c.Req.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request) return or } // funcInclude returns the contents of filename relative to the site root. // Note that included files are NOT escaped, so you should only include // trusted files. If it is not trusted, be sure to use escaping functions // in your template. func (c templateContext) funcInclude(filename string, args ...interface{}) (string, error) { if c.Root == nil { return "", fmt.Errorf("root file system not specified") } file, err := c.Root.Open(filename) if err != nil { return "", err } defer file.Close() bodyBuf := bufPool.Get().(*bytes.Buffer) bodyBuf.Reset() defer bufPool.Put(bodyBuf) _, err = io.Copy(bodyBuf, file) if err != nil { return "", err } c.Args = args err = c.executeTemplateInBuffer(filename, bodyBuf) if err != nil { return "", err } return bodyBuf.String(), nil } // funcHTTPInclude returns the body of a virtual (lightweight) request // to the given URI on the same server. Note that included bodies // are NOT escaped, so you should only include trusted resources. // If it is not trusted, be sure to use escaping functions yourself. func (c templateContext) funcHTTPInclude(uri string) (string, error) { // prevent virtual request loops by counting how many levels // deep we are; and if we get too deep, return an error recursionCount := 1 if numStr := c.Req.Header.Get(recursionPreventionHeader); numStr != "" { num, err := strconv.Atoi(numStr) if err != nil { return "", fmt.Errorf("parsing %s: %v", recursionPreventionHeader, err) } if num >= 3 { return "", fmt.Errorf("virtual request cycle") } recursionCount = num + 1 } buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) virtReq, err := http.NewRequest("GET", uri, nil) if err != nil { return "", err } virtReq.Host = c.Req.Host virtReq.Header = c.Req.Header.Clone() virtReq.Trailer = c.Req.Trailer.Clone() virtReq.Header.Set(recursionPreventionHeader, strconv.Itoa(recursionCount)) vrw := &virtualResponseWriter{body: buf, header: make(http.Header)} server := c.Req.Context().Value(caddyhttp.ServerCtxKey).(http.Handler) server.ServeHTTP(vrw, virtReq) if vrw.status >= 400 { return "", fmt.Errorf("http %d", vrw.status) } err = c.executeTemplateInBuffer(uri, buf) if err != nil { return "", err } return buf.String(), nil } func (c templateContext) executeTemplateInBuffer(tplName string, buf *bytes.Buffer) error { tpl := template.New(tplName) if len(c.config.Delimiters) == 2 { tpl.Delims(c.config.Delimiters[0], c.config.Delimiters[1]) } tpl.Funcs(sprigFuncMap) tpl.Funcs(template.FuncMap{ "include": c.funcInclude, "httpInclude": c.funcHTTPInclude, "stripHTML": c.funcStripHTML, "markdown": c.funcMarkdown, "splitFrontMatter": c.funcSplitFrontMatter, "listFiles": c.funcListFiles, "env": c.funcEnv, "placeholder": c.placeholder, }) parsedTpl, err := tpl.Parse(buf.String()) if err != nil { return err } buf.Reset() // reuse buffer for output return parsedTpl.Execute(buf, c) } func (c templateContext) placeholder(name string) string { repl := c.Req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) value, _ := repl.GetString(name) return value } func (templateContext) funcEnv(varName string) string { return os.Getenv(varName) } // Cookie gets the value of a cookie with name name. func (c templateContext) Cookie(name string) string { cookies := c.Req.Cookies() for _, cookie := range cookies { if cookie.Name == name { return cookie.Value } } return "" } // RemoteIP gets the IP address of the client making the request. func (c templateContext) RemoteIP() string { ip, _, err := net.SplitHostPort(c.Req.RemoteAddr) if err != nil { return c.Req.RemoteAddr } return ip } // Host returns the hostname portion of the Host header // from the HTTP request. func (c templateContext) Host() (string, error) { host, _, err := net.SplitHostPort(c.Req.Host) if err != nil { if !strings.Contains(c.Req.Host, ":") { // common with sites served on the default port 80 return c.Req.Host, nil } return "", err } return host, nil } // funcStripHTML returns s without HTML tags. It is fairly naive // but works with most valid HTML inputs. func (templateContext) funcStripHTML(s string) string { var buf bytes.Buffer var inTag, inQuotes bool var tagStart int for i, ch := range s { if inTag { if ch == '>' && !inQuotes { inTag = false } else if ch == '<' && !inQuotes { // false start buf.WriteString(s[tagStart:i]) tagStart = i } else if ch == '"' { inQuotes = !inQuotes } continue } if ch == '<' { inTag = true tagStart = i continue } buf.WriteRune(ch) } if inTag { // false start buf.WriteString(s[tagStart:]) } return buf.String() } // funcMarkdown renders the markdown body as HTML. The resulting // HTML is NOT escaped so that it can be rendered as HTML. func (templateContext) funcMarkdown(input interface{}) (string, error) { inputStr := toString(input) md := goldmark.New( goldmark.WithExtensions( extension.GFM, extension.Footnote, highlighting.NewHighlighting( highlighting.WithFormatOptions( html.WithClasses(true), ), ), ), goldmark.WithParserOptions( parser.WithAutoHeadingID(), ), goldmark.WithRendererOptions( gmhtml.WithUnsafe(), // TODO: this is not awesome, maybe should be configurable? ), ) buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) md.Convert([]byte(inputStr), buf) return buf.String(), nil } // splitFrontMatter parses front matter out from the beginning of input, // and returns the separated key-value pairs and the body/content. input // must be a "stringy" value. func (templateContext) funcSplitFrontMatter(input interface{}) (parsedMarkdownDoc, error) { meta, body, err := extractFrontMatter(toString(input)) if err != nil { return parsedMarkdownDoc{}, err } return parsedMarkdownDoc{Meta: meta, Body: body}, nil } // funcListFiles reads and returns a slice of names from the given // directory relative to the root of c. func (c templateContext) funcListFiles(name string) ([]string, error) { if c.Root == nil { return nil, fmt.Errorf("root file system not specified") } dir, err := c.Root.Open(path.Clean(name)) if err != nil { return nil, err } defer dir.Close() stat, err := dir.Stat() if err != nil { return nil, err } if !stat.IsDir() { return nil, fmt.Errorf("%v is not a directory", name) } dirInfo, err := dir.Readdir(0) if err != nil { return nil, err } names := make([]string, len(dirInfo)) for i, fileInfo := range dirInfo { names[i] = fileInfo.Name() } return names, nil } // tplWrappedHeader wraps niladic functions so that they // can be used in templates. (Template functions must // return a value.) type tplWrappedHeader struct{ http.Header } // Add adds a header field value, appending val to // existing values for that field. It returns an // empty string. func (h tplWrappedHeader) Add(field, val string) string { h.Header.Add(field, val) return "" } // Set sets a header field value, overwriting any // other values for that field. It returns an // empty string. func (h tplWrappedHeader) Set(field, val string) string { h.Header.Set(field, val) return "" } // Del deletes a header field. It returns an empty string. func (h tplWrappedHeader) Del(field string) string { h.Header.Del(field) return "" } func toString(input interface{}) string { switch v := input.(type) { case string: return v case fmt.Stringer: return v.String() case error: return v.Error() default: return fmt.Sprintf("%v", input) } } var bufPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } // at time of writing, sprig.FuncMap() makes a copy, thus // involves iterating the whole map, so do it just once var sprigFuncMap = sprig.TxtFuncMap() const recursionPreventionHeader = "Caddy-Templates-Include"
1
15,758
It'll be interesting to see who or what this breaks...
caddyserver-caddy
go
@@ -96,7 +96,7 @@ public class InternalSelenseTestBase extends SeleneseTestBase { return; } - log.info("In dev mode. Copying required files in case we're using a WebDriver-backed Selenium"); + log.finest("In dev mode. Copying required files in case we're using a WebDriver-backed Selenium"); Path dir = InProject.locate("java/client/build/production/com/thoughtworks/selenium/webdriven");
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.thoughtworks.selenium; import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.openqa.selenium.UnexpectedAlertBehaviour.IGNORE; import static org.openqa.selenium.build.DevMode.isInDevMode; import static org.openqa.selenium.remote.CapabilityType.UNEXPECTED_ALERT_BEHAVIOUR; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableSet; import com.google.common.io.Resources; import com.thoughtworks.selenium.testing.SeleniumTestEnvironment; import com.thoughtworks.selenium.webdriven.WebDriverBackedSelenium; import org.junit.After; import org.junit.AfterClass; import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.ExternalResource; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; import org.junit.rules.TestWatcher; import org.junit.runner.Description; import org.junit.runners.model.Statement; import org.openqa.selenium.build.BuckBuild; import org.openqa.selenium.Capabilities; import org.openqa.selenium.JavascriptExecutor; import org.openqa.selenium.MutableCapabilities; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WrapsDriver; import org.openqa.selenium.chrome.ChromeOptions; import org.openqa.selenium.edge.EdgeOptions; import org.openqa.selenium.environment.GlobalTestEnvironment; import org.openqa.selenium.firefox.FirefoxOptions; import org.openqa.selenium.ie.InternetExplorerOptions; import org.openqa.selenium.opera.OperaOptions; import org.openqa.selenium.remote.DesiredCapabilities; import org.openqa.selenium.build.DevMode; import org.openqa.selenium.build.InProject; import org.openqa.selenium.safari.SafariOptions; import org.openqa.selenium.testing.drivers.Browser; import org.openqa.selenium.testing.drivers.WebDriverBuilder; import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Logger; public class InternalSelenseTestBase extends SeleneseTestBase { private static final Logger log = Logger.getLogger(InternalSelenseTestBase.class.getName()); private static final ImmutableSet<String> ATOM_TARGETS = ImmutableSet.of( "findElement", "findOption", "fireEvent", "fireEventAt", "getAttribute", "getText", "linkLocator", "isElementPresent", "isSomethingSelected", "isTextPresent", "isVisible", "setCursorPosition", "type"); private static Selenium INSTANCE; private static final AtomicBoolean MUST_BUILD = new AtomicBoolean(true); @BeforeClass public static void buildJavascriptLibraries() throws IOException { if (!DevMode.isInDevMode() || !MUST_BUILD.compareAndSet(true, false)) { return; } log.info("In dev mode. Copying required files in case we're using a WebDriver-backed Selenium"); Path dir = InProject.locate("java/client/build/production/com/thoughtworks/selenium/webdriven"); Files.createDirectories(dir); for (String target : ATOM_TARGETS) { Path atom = new BuckBuild().of("//javascript/selenium-atoms:" + target).go(isInDevMode()); Files.copy(atom, dir.resolve(atom.getFileName()), REPLACE_EXISTING); } Path sizzle = InProject.locate("third_party/js/sizzle/sizzle.js"); Files.copy(sizzle, dir.resolve("sizzle.js"), REPLACE_EXISTING); Path seDir = InProject.locate("java/client/test/com/thoughtworks/selenium"); Path destDir = InProject.locate("java/client/build/production/com/thoughtworks/selenium"); Files.list(seDir) .filter(path -> path.getFileName().toString().endsWith(".js")) .forEach(path -> { try { Files.copy(path, destDir.resolve(path.getFileName()), REPLACE_EXISTING); } catch (IOException e) { throw new RuntimeException(e); } }); } @BeforeClass public static void initializeServer() { GlobalTestEnvironment.get(SeleniumTestEnvironment.class); } public TestWatcher traceMethodName = new TestWatcher() { @Override protected void starting(Description description) { super.starting(description); log.info(">>> Starting " + description); } @Override protected void finished(Description description) { super.finished(description); log.info("<<< Finished " + description); } }; public ExternalResource initializeSelenium = new ExternalResource() { @Override protected void before() { selenium = INSTANCE; if (selenium != null) { return; } MutableCapabilities caps = new MutableCapabilities(createCapabilities()); caps.setCapability(UNEXPECTED_ALERT_BEHAVIOUR, IGNORE); String baseUrl = whereIs("selenium-server/"); WebDriver driver = new WebDriverBuilder().get(caps); selenium = new WebDriverBackedSelenium(driver, baseUrl); selenium.setBrowserLogLevel("debug"); INSTANCE = selenium; } }; private Capabilities createCapabilities() { String property = System.getProperty("selenium.browser", "ff"); Browser browser = Browser.valueOf(property); switch (browser) { case CHROME: return new ChromeOptions(); case EDGE: return new EdgeOptions(); case IE: return new InternetExplorerOptions(); case FIREFOX: case MARIONETTE: return new FirefoxOptions(); case OPERA: case OPERABLINK: return new OperaOptions(); case SAFARI: return new SafariOptions(); default: fail("Attempt to use an unsupported browser: " + property); // we never get here, but keep null checks happy anyway return new DesiredCapabilities(); } } public ExternalResource addNecessaryJavascriptCommands = new ExternalResource() { @Override protected void before() { if (!(selenium instanceof WebDriverBackedSelenium)) { return; } // We need to be a on page where we can execute JS WebDriver driver = ((WrapsDriver) selenium).getWrappedDriver(); driver.get(whereIs("/selenium-server")); try { URL scriptUrl = Resources.getResource(getClass(), "/com/thoughtworks/selenium/testHelpers.js"); String script = Resources.toString(scriptUrl, StandardCharsets.UTF_8); ((JavascriptExecutor) driver).executeScript(script); } catch (IOException e) { fail("Cannot read script: " + Throwables.getStackTraceAsString(e)); } } }; public ExternalResource returnFocusToMainWindow = new ExternalResource() { @Override protected void before() { if (selenium == null) { return; } selenium.selectWindow(""); selenium.windowFocus(); } }; public TestWatcher filter = new TestWatcher() { @Override public Statement apply(Statement base, Description description) { String onlyRun = System.getProperty("only_run"); Assume.assumeTrue(onlyRun == null || Arrays.asList(onlyRun.split(",")).contains(description.getTestClass().getSimpleName())); String mth = System.getProperty("method"); Assume.assumeTrue(mth == null || Arrays.asList(mth.split(",")).contains(description.getMethodName())); return super.apply(base, description); } }; @Rule public TestRule chain = RuleChain.outerRule(filter) .around(initializeSelenium) .around(returnFocusToMainWindow) .around(addNecessaryJavascriptCommands) .around(traceMethodName); @After public void checkVerifications() { checkForVerificationErrors(); } private String whereIs(String location) { return GlobalTestEnvironment.get().getAppServer().whereIs(location); } @AfterClass public static void destroyDriver() { if (Boolean.getBoolean("webdriver.singletestsuite.leaverunning")) { return; } Selenium selenium = INSTANCE; if (selenium != null) { selenium.stop(); INSTANCE = null; } } }
1
16,448
This change is incorrect: the current log level is correct.
SeleniumHQ-selenium
py
@@ -58,7 +58,7 @@ type clientFactory struct { // NewClientFactory creates a new ClientFactory func NewClientFactory() ClientFactory { - logger := log.NewDefaultLogger() + logger := log.NewTestLogger() return &clientFactory{ logger: logger,
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package cli import ( "crypto/tls" "crypto/x509" "errors" "io/ioutil" "net" "github.com/urfave/cli" "go.temporal.io/api/workflowservice/v1" sdkclient "go.temporal.io/sdk/client" "google.golang.org/grpc" "google.golang.org/grpc/credentials" healthpb "google.golang.org/grpc/health/grpc_health_v1" "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/common/auth" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" ) // ClientFactory is used to construct rpc clients type ClientFactory interface { FrontendClient(c *cli.Context) workflowservice.WorkflowServiceClient AdminClient(c *cli.Context) adminservice.AdminServiceClient SDKClient(c *cli.Context, namespace string) sdkclient.Client HealthClient(c *cli.Context) healthpb.HealthClient } type clientFactory struct { logger log.Logger } // NewClientFactory creates a new ClientFactory func NewClientFactory() ClientFactory { logger := log.NewDefaultLogger() return &clientFactory{ logger: logger, } } // FrontendClient builds a frontend client func (b *clientFactory) FrontendClient(c *cli.Context) workflowservice.WorkflowServiceClient { connection, _ := b.createGRPCConnection(c) return workflowservice.NewWorkflowServiceClient(connection) } // AdminClient builds an admin client. func (b *clientFactory) AdminClient(c *cli.Context) adminservice.AdminServiceClient { connection, _ := b.createGRPCConnection(c) return adminservice.NewAdminServiceClient(connection) } // SDKClient builds an SDK client. func (b *clientFactory) SDKClient(c *cli.Context, namespace string) sdkclient.Client { hostPort := c.GlobalString(FlagAddress) if hostPort == "" { hostPort = localHostPort } tlsConfig, err := b.createTLSConfig(c) if err != nil { b.logger.Fatal("Failed to configure TLS for SDK client", tag.Error(err)) } sdkClient, err := sdkclient.NewClient(sdkclient.Options{ HostPort: hostPort, Namespace: namespace, Logger: log.NewSdkLogger(b.logger), ConnectionOptions: sdkclient.ConnectionOptions{ DisableHealthCheck: true, TLS: tlsConfig, }, }) if err != nil { b.logger.Fatal("Failed to create SDK client", tag.Error(err)) } return sdkClient } // HealthClient builds a health client. func (b *clientFactory) HealthClient(c *cli.Context) healthpb.HealthClient { connection, _ := b.createGRPCConnection(c) return healthpb.NewHealthClient(connection) } func (b *clientFactory) createGRPCConnection(c *cli.Context) (*grpc.ClientConn, error) { hostPort := c.GlobalString(FlagAddress) if hostPort == "" { hostPort = localHostPort } tlsConfig, err := b.createTLSConfig(c) if err != nil { return nil, err } grpcSecurityOptions := grpc.WithInsecure() if tlsConfig != nil { grpcSecurityOptions = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)) } connection, err := grpc.Dial(hostPort, grpcSecurityOptions) if err != nil { b.logger.Fatal("Failed to create connection", tag.Error(err)) return nil, err } return connection, nil } func (b *clientFactory) createTLSConfig(c *cli.Context) (*tls.Config, error) { certPath := c.GlobalString(FlagTLSCertPath) keyPath := c.GlobalString(FlagTLSKeyPath) caPath := c.GlobalString(FlagTLSCaPath) disableHostNameVerification := c.GlobalBool(FlagTLSDisableHostVerification) serverName := c.GlobalString(FlagTLSServerName) var host string var cert *tls.Certificate var caPool *x509.CertPool if caPath != "" { caCertPool, err := fetchCACert(caPath) if err != nil { b.logger.Fatal("Failed to load server CA certificate", tag.Error(err)) return nil, err } caPool = caCertPool } if certPath != "" { myCert, err := tls.LoadX509KeyPair(certPath, keyPath) if err != nil { b.logger.Fatal("Failed to load client certificate", tag.Error(err)) return nil, err } cert = &myCert } // If we are given arguments to verify either server or client, configure TLS if caPool != nil || cert != nil { if serverName != "" { host = serverName } else { hostPort := c.GlobalString(FlagAddress) if hostPort == "" { hostPort = localHostPort } // Ignoring error as we'll fail to dial anyway, and that will produce a meaningful error host, _, _ = net.SplitHostPort(hostPort) } tlsConfig := auth.NewTLSConfigForServer(host, !disableHostNameVerification) if caPool != nil { tlsConfig.RootCAs = caPool } if cert != nil { tlsConfig.Certificates = []tls.Certificate{*cert} } return tlsConfig, nil } // If we are given a server name, set the TLS server name for DNS resolution if serverName != "" { host = serverName tlsConfig := auth.NewTLSConfigForServer(host, !disableHostNameVerification) return tlsConfig, nil } return nil, nil } func fetchCACert(path string) (*x509.CertPool, error) { caPool := x509.NewCertPool() caBytes, err := ioutil.ReadFile(path) if err != nil { return nil, err } if !caPool.AppendCertsFromPEM(caBytes) { return nil, errors.New("unknown failure constructing cert pool for ca") } return caPool, nil }
1
11,747
There are few cases like this where default logger is used from CLI/db tools. I would copy `NewTestLogger` to `NewCLILogger` and use it everywhere in CLI. In future these two might be different.
temporalio-temporal
go
@@ -222,4 +222,16 @@ public abstract class AbstractAuthenticationToken implements Authentication, return sb.toString(); } + + protected static Integer extractKeyHash(String key) { + Object value = nullSafeValue(key); + return value.hashCode(); + } + + protected static Object nullSafeValue(Object value){ + if(value == null || "".equals(value)) { + throw new IllegalArgumentException("Cannot pass null or empty values to constructor"); + } + return value; + } }
1
/* * Copyright 2004, 2005, 2006 Acegi Technology Pty Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.authentication; import java.security.Principal; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import org.springframework.security.core.Authentication; import org.springframework.security.core.GrantedAuthority; import org.springframework.security.core.CredentialsContainer; import org.springframework.security.core.authority.AuthorityUtils; import org.springframework.security.core.userdetails.UserDetails; /** * Base class for <code>Authentication</code> objects. * <p> * Implementations which use this class should be immutable. * * @author Ben Alex * @author Luke Taylor */ public abstract class AbstractAuthenticationToken implements Authentication, CredentialsContainer { // ~ Instance fields // ================================================================================================ private Object details; private final Collection<GrantedAuthority> authorities; private boolean authenticated = false; // ~ Constructors // =================================================================================================== /** * Creates a token with the supplied array of authorities. * * @param authorities the collection of <tt>GrantedAuthority</tt>s for the principal * represented by this authentication object. */ public AbstractAuthenticationToken(Collection<? extends GrantedAuthority> authorities) { if (authorities == null) { this.authorities = AuthorityUtils.NO_AUTHORITIES; return; } for (GrantedAuthority a : authorities) { if (a == null) { throw new IllegalArgumentException( "Authorities collection cannot contain any null elements"); } } ArrayList<GrantedAuthority> temp = new ArrayList<GrantedAuthority>( authorities.size()); temp.addAll(authorities); this.authorities = Collections.unmodifiableList(temp); } // ~ Methods // ======================================================================================================== public Collection<GrantedAuthority> getAuthorities() { return authorities; } public String getName() { if (this.getPrincipal() instanceof UserDetails) { return ((UserDetails) this.getPrincipal()).getUsername(); } if (getPrincipal() instanceof Principal) { return ((Principal) getPrincipal()).getName(); } return (this.getPrincipal() == null) ? "" : this.getPrincipal().toString(); } public boolean isAuthenticated() { return authenticated; } public void setAuthenticated(boolean authenticated) { this.authenticated = authenticated; } public Object getDetails() { return details; } public void setDetails(Object details) { this.details = details; } /** * Checks the {@code credentials}, {@code principal} and {@code details} objects, * invoking the {@code eraseCredentials} method on any which implement * {@link CredentialsContainer}. */ public void eraseCredentials() { eraseSecret(getCredentials()); eraseSecret(getPrincipal()); eraseSecret(details); } private void eraseSecret(Object secret) { if (secret instanceof CredentialsContainer) { ((CredentialsContainer) secret).eraseCredentials(); } } @Override public boolean equals(Object obj) { if (!(obj instanceof AbstractAuthenticationToken)) { return false; } AbstractAuthenticationToken test = (AbstractAuthenticationToken) obj; if (!authorities.equals(test.authorities)) { return false; } if ((this.details == null) && (test.getDetails() != null)) { return false; } if ((this.details != null) && (test.getDetails() == null)) { return false; } if ((this.details != null) && (!this.details.equals(test.getDetails()))) { return false; } if ((this.getCredentials() == null) && (test.getCredentials() != null)) { return false; } if ((this.getCredentials() != null) && !this.getCredentials().equals(test.getCredentials())) { return false; } if (this.getPrincipal() == null && test.getPrincipal() != null) { return false; } if (this.getPrincipal() != null && !this.getPrincipal().equals(test.getPrincipal())) { return false; } return this.isAuthenticated() == test.isAuthenticated(); } @Override public int hashCode() { int code = 31; for (GrantedAuthority authority : authorities) { code ^= authority.hashCode(); } if (this.getPrincipal() != null) { code ^= this.getPrincipal().hashCode(); } if (this.getCredentials() != null) { code ^= this.getCredentials().hashCode(); } if (this.getDetails() != null) { code ^= this.getDetails().hashCode(); } if (this.isAuthenticated()) { code ^= -37; } return code; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(super.toString()).append(": "); sb.append("Principal: ").append(this.getPrincipal()).append("; "); sb.append("Credentials: [PROTECTED]; "); sb.append("Authenticated: ").append(this.isAuthenticated()).append("; "); sb.append("Details: ").append(this.getDetails()).append("; "); if (!authorities.isEmpty()) { sb.append("Granted Authorities: "); int i = 0; for (GrantedAuthority authority : authorities) { if (i++ > 0) { sb.append(", "); } sb.append(authority); } } else { sb.append("Not granted any authorities"); } return sb.toString(); } }
1
9,209
While it provides re-use, this method does not make sense in `AbstractAuthenticationToken` because it knows nothing of a hash key. Instead, we should move this to a private method within each subclass.
spring-projects-spring-security
java
@@ -322,7 +322,7 @@ def extract_record_set(records, filters, sorting, paginated = {} for rule in pagination_rules: values = apply_filters(filtered, rule) - paginated.update(((x[id_field], x) for x in values)) + paginated.update((id(x), x) for x in values) paginated = paginated.values() else: paginated = filtered
1
import re import operator from collections import defaultdict from collections import abc import numbers from kinto.core import utils from kinto.core.decorators import synchronized from kinto.core.storage import ( StorageBase, exceptions, DEFAULT_ID_FIELD, DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD, MISSING) from kinto.core.utils import (COMPARISON, find_nested_value) import json import ujson def tree(): return defaultdict(tree) class MemoryBasedStorage(StorageBase): """Abstract storage class, providing basic operations and methods for in-memory implementations of sorting and filtering. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def initialize_schema(self, dry_run=False): # Nothing to do. pass def strip_deleted_record(self, collection_id, parent_id, record, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD): """Strip the record of all its fields expect id and timestamp, and set the deletion field value (e.g deleted=True) """ deleted = {} deleted[id_field] = record[id_field] deleted[modified_field] = record[modified_field] deleted[deleted_field] = True return deleted def set_record_timestamp(self, collection_id, parent_id, record, modified_field=DEFAULT_MODIFIED_FIELD, last_modified=None): timestamp = self.bump_and_store_timestamp(collection_id, parent_id, record, modified_field, last_modified=last_modified) record[modified_field] = timestamp return record def extract_record_set(self, records, filters, sorting, id_field, deleted_field, pagination_rules=None, limit=None): """Take the list of records and handle filtering, sorting and pagination. """ return extract_record_set(records, filters=filters, sorting=sorting, id_field=id_field, deleted_field=deleted_field, pagination_rules=pagination_rules, limit=limit) def bump_timestamp(self, collection_timestamp, record, modified_field, last_modified): """Timestamp are base on current millisecond. .. note :: Here it is assumed that if requests from the same user burst in, the time will slide into the future. It is not problematic since the timestamp notion is opaque, and behaves like a revision number. """ is_specified = (record is not None and modified_field in record or last_modified is not None) if is_specified: # If there is a timestamp in the new record, try to use it. if last_modified is not None: current = last_modified else: current = record[modified_field] # If it is equal to current collection timestamp, bump it. if current == collection_timestamp: collection_timestamp += 1 current = collection_timestamp # If it is superior (future), use it as new collection timestamp. elif current > collection_timestamp: collection_timestamp = current # Else (past), do nothing. else: # Not specified, use a new one. current = utils.msec_time() # If two ops in the same msec, bump it. if current <= collection_timestamp: current = collection_timestamp + 1 collection_timestamp = current return current, collection_timestamp def bump_and_store_timestamp(self, collection_id, parent_id, record=None, modified_field=None, last_modified=None): """Use the bump_timestamp to get its next value and store the collection_timestamp. """ raise NotImplementedError class Storage(MemoryBasedStorage): """Storage backend implementation in memory. Useful for development or testing purposes, but records are lost after each server restart. Enable in configuration:: kinto.storage_backend = kinto.core.storage.memory Enable strict json validation before saving (instead of the more lenient ujson, see #1238):: kinto.storage_strict_json = true """ def __init__(self, *args, readonly=False, **kwargs): super().__init__(*args, **kwargs) self.readonly = readonly self.flush() def flush(self, auth=None): self._store = tree() self._cemetery = tree() self._timestamps = defaultdict(dict) @synchronized def collection_timestamp(self, collection_id, parent_id, auth=None): ts = self._timestamps[parent_id].get(collection_id) if ts is not None: return ts if self.readonly: error_msg = 'Cannot initialize empty collection timestamp when running in readonly.' raise exceptions.BackendError(message=error_msg) return self.bump_and_store_timestamp(collection_id, parent_id) def bump_and_store_timestamp(self, collection_id, parent_id, record=None, modified_field=None, last_modified=None): """Use the bump_timestamp to get its next value and store the collection_timestamp. """ current_collection_timestamp = self._timestamps[parent_id].get(collection_id, 0) current, collection_timestamp = self.bump_timestamp( current_collection_timestamp, record, modified_field, last_modified) self._timestamps[parent_id][collection_id] = collection_timestamp return current @synchronized def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): id_generator = id_generator or self.id_generator record = {**record} if id_field in record: # Raise unicity error if record with same id already exists. try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() self.set_record_timestamp(collection_id, parent_id, record, modified_field=modified_field) _id = record[id_field] record = ujson.loads(self.json.dumps(record)) self._store[parent_id][collection_id][_id] = record self._cemetery[parent_id][collection_id].pop(_id, None) return record @synchronized def get(self, collection_id, parent_id, object_id, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): collection = self._store[parent_id][collection_id] if object_id not in collection: raise exceptions.RecordNotFoundError(object_id) return {**collection[object_id]} @synchronized def update(self, collection_id, parent_id, object_id, record, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): record = {**record} record[id_field] = object_id record = ujson.loads(self.json.dumps(record)) self.set_record_timestamp(collection_id, parent_id, record, modified_field=modified_field) self._store[parent_id][collection_id][object_id] = record self._cemetery[parent_id][collection_id].pop(object_id, None) return record @synchronized def delete(self, collection_id, parent_id, object_id, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, last_modified=None): existing = self.get(collection_id, parent_id, object_id) # Need to delete the last_modified field of the record. del existing[modified_field] self.set_record_timestamp(collection_id, parent_id, existing, modified_field=modified_field, last_modified=last_modified) existing = self.strip_deleted_record(collection_id, parent_id, existing) # Add to deleted items, remove from store. if with_deleted: deleted = {**existing} self._cemetery[parent_id][collection_id][object_id] = deleted self._store[parent_id][collection_id].pop(object_id) return existing @synchronized def purge_deleted(self, collection_id, parent_id, before=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): parent_id_match = re.compile(parent_id.replace('*', '.*')) by_parent_id = {pid: collections for pid, collections in self._cemetery.items() if parent_id_match.match(pid)} num_deleted = 0 for pid, collections in by_parent_id.items(): if collection_id is not None: collections = {collection_id: collections[collection_id]} for collection, colrecords in collections.items(): if before is None: kept = {} else: kept = {key: value for key, value in colrecords.items() if value[modified_field] >= before} self._cemetery[pid][collection] = kept num_deleted += (len(colrecords) - len(kept)) return num_deleted @synchronized def get_all(self, collection_id, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, include_deleted=False, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None): records = _get_objects_by_parent_id(self._store, parent_id, collection_id) records, count = self.extract_record_set(records=records, filters=filters, sorting=None, id_field=id_field, deleted_field=deleted_field) deleted = [] if include_deleted: deleted = _get_objects_by_parent_id(self._cemetery, parent_id, collection_id) records, count = self.extract_record_set(records=records + deleted, filters=filters, sorting=sorting, id_field=id_field, deleted_field=deleted_field, pagination_rules=pagination_rules, limit=limit) return records, count @synchronized def delete_all(self, collection_id, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None): records = _get_objects_by_parent_id(self._store, parent_id, collection_id, with_meta=True) records, count = self.extract_record_set(records=records, filters=filters, sorting=sorting, pagination_rules=pagination_rules, limit=limit, id_field=id_field, deleted_field=deleted_field) deleted = [self.delete(r.pop('__collection_id__'), r.pop('__parent_id__'), r[id_field], id_field=id_field, with_deleted=with_deleted, modified_field=modified_field, deleted_field=deleted_field) for r in records] return deleted def extract_record_set(records, filters, sorting, pagination_rules=None, limit=None, id_field=DEFAULT_ID_FIELD, deleted_field=DEFAULT_DELETED_FIELD): """Apply filters, sorting, limit, and pagination rules to the list of `records`. """ filtered = list(apply_filters(records, filters or [])) total_records = len(filtered) if pagination_rules: paginated = {} for rule in pagination_rules: values = apply_filters(filtered, rule) paginated.update(((x[id_field], x) for x in values)) paginated = paginated.values() else: paginated = filtered sorted_ = apply_sorting(paginated, sorting or []) filtered_deleted = len([r for r in sorted_ if r.get(deleted_field) is True]) if limit: sorted_ = list(sorted_)[:limit] return sorted_, total_records - filtered_deleted def canonical_json(record): return json.dumps(record, sort_keys=True, separators=(',', ':')) def apply_filters(records, filters): """Filter the specified records, using basic iteration. """ def contains_filtering(record_value, search_term): if record_value == MISSING: return False try: search_set = set([canonical_json(v) for v in search_term]) record_value_set = set([canonical_json(v) for v in record_value]) except TypeError: return False return record_value_set.intersection(search_set) == search_set def contains_any_filtering(record_value, search_term): if record_value == MISSING: return False try: search_set = set([canonical_json(v) for v in search_term]) record_value_set = set([canonical_json(v) for v in record_value]) except TypeError: return False return record_value_set.intersection(search_set) operators = { COMPARISON.LT: operator.lt, COMPARISON.MAX: operator.le, COMPARISON.EQ: operator.eq, COMPARISON.NOT: operator.ne, COMPARISON.MIN: operator.ge, COMPARISON.GT: operator.gt, COMPARISON.IN: operator.contains, COMPARISON.EXCLUDE: lambda x, y: not operator.contains(x, y), COMPARISON.LIKE: lambda x, y: re.search(y, x, re.IGNORECASE), COMPARISON.CONTAINS: contains_filtering, COMPARISON.CONTAINS_ANY: contains_any_filtering, } for record in records: matches = True for f in filters: right = f.value if f.field == DEFAULT_ID_FIELD: if isinstance(right, int): right = str(right) left = find_nested_value(record, f.field, MISSING) if f.operator in (COMPARISON.IN, COMPARISON.EXCLUDE): right, left = left, right elif f.operator == COMPARISON.LIKE: # Add implicit start/end wildchars if none is specified. if '*' not in right: right = '*{}*'.format(right) right = '^{}$'.format(right.replace('*', '.*')) elif f.operator in (COMPARISON.LT, COMPARISON.MAX, COMPARISON.EQ, COMPARISON.NOT, COMPARISON.MIN, COMPARISON.GT): left = schwartzian_transform(left) right = schwartzian_transform(right) if f.operator == COMPARISON.HAS: matches = left != MISSING if f.value else left == MISSING else: matches = matches and operators[f.operator](left, right) if matches: yield record def schwartzian_transform(value): """Decorate a value with a tag that enforces the Postgres sort order. The sort order, per https://www.postgresql.org/docs/9.6/static/datatype-json.html, is: Object > Array > Boolean > Number > String > Null Note that there are more interesting rules for comparing objects and arrays but we probably don't need to be that compatible. MISSING represents what would be a SQL NULL, which is "bigger" than everything else. """ if value is None: return (0, value) if isinstance(value, str): return (1, value) if isinstance(value, bool): # This has to be before Number, because bools are a subclass # of int :( return (3, value) if isinstance(value, numbers.Number): return (2, value) if isinstance(value, abc.Sequence): return (4, value) if isinstance(value, abc.Mapping): return (5, value) if value is MISSING: return (6, value) raise ValueError('Unknown value: {}'.format(value)) # pragma: no cover def apply_sorting(records, sorting): """Sort the specified records, using cumulative python sorting. """ result = list(records) if not result: return result def column(record, name): return schwartzian_transform(find_nested_value(record, name, default=MISSING)) for sort in reversed(sorting): result = sorted(result, key=lambda r: column(r, sort.field), reverse=(sort.direction < 0)) return result def _get_objects_by_parent_id(store, parent_id, collection_id, with_meta=False): if parent_id is not None: parent_id_match = re.compile('^{}$'.format(parent_id.replace('*', '.*'))) by_parent_id = {pid: collections for pid, collections in store.items() if parent_id_match.match(pid)} else: by_parent_id = store[parent_id] objects = [] for pid, collections in by_parent_id.items(): if collection_id is not None: collections = {collection_id: collections[collection_id]} for collection, colobjects in collections.items(): for r in colobjects.values(): if with_meta: objects.append(dict(__collection_id__=collection, __parent_id__=pid, **r)) else: objects.append(r) return objects def load_from_config(config): settings = {**config.get_settings()} strict = settings.get('storage_strict_json', False) return Storage(strict_json=strict)
1
11,714
index by memory address? I realize I don't understand why we don't just build a list :)
Kinto-kinto
py
@@ -7,6 +7,7 @@ import db.user user_bp = Blueprint("user", __name__) + @user_bp.route("/lastfmscraper/<user_id>.js") @crossdomain() def lastfmscraper(user_id):
1
from __future__ import absolute_import from flask import Blueprint, render_template, request, url_for, Response from flask_login import current_user from werkzeug.exceptions import NotFound from webserver.decorators import crossdomain import db.user user_bp = Blueprint("user", __name__) @user_bp.route("/lastfmscraper/<user_id>.js") @crossdomain() def lastfmscraper(user_id): user_token = request.args.get("user_token") lastfm_username = request.args.get("lastfm_username") if user_token is None or lastfm_username is None: raise NotFound params = {"base_url": url_for("listen.submit_listen", user_id=user_id, _external=True), "user_token": user_token, "lastfm_username": lastfm_username} scraper = render_template("user/scraper.js", **params) return Response(scraper, content_type="text/javascript") @user_bp.route("/<user_id>") def profile(user_id): if current_user.is_authenticated() and \ current_user.musicbrainz_id == user_id: user = current_user else: user = db.user.get_by_mb_id(user_id) if user is None: raise NotFound("Can't find this user.") return render_template("user/profile.html", user=user) @user_bp.route("/<user_id>/import") def import_data(user_id): lastfm_username = request.args.get("lastfm_username") if current_user.is_authenticated() and \ current_user.musicbrainz_id == user_id: user = current_user else: user = db.user.get_by_mb_id(user_id) if user is None: raise NotFound("Can't find this user.") if lastfm_username: params = {"base_url": url_for("user.lastfmscraper", user_id=user_id, _external=True), "user_token": user.auth_token, "lastfm_username": lastfm_username} loader = render_template("user/loader.js", **params) loader = "javascript:%s" % loader else: loader = None return render_template("user/import.html", user=user, loader=loader, lastfm_username=lastfm_username)
1
13,198
Not part of this commit, but we thought that this may not be a good place for this url, as it is in the `/user/` namespace (effectively preventing us having a user called `lastfmscraper`, however rare it may be)
metabrainz-listenbrainz-server
py
@@ -25,6 +25,7 @@ namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNet internal const string HttpContextKey = "__Datadog.Trace.ClrProfiler.Integrations.AspNetMvcIntegration"; private const string OperationName = "aspnet-mvc.request"; + private const string ChildActionOperationName = "aspnet-mvc.request.child-action"; private const string RouteCollectionRouteTypeName = "System.Web.Mvc.Routing.RouteCollectionRoute";
1
// <copyright file="AspNetMvcIntegration.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> #if NETFRAMEWORK using System; using System.Collections.Generic; using System.Linq; using System.Web.Routing; using Datadog.Trace.AspNet; using Datadog.Trace.Configuration; using Datadog.Trace.ExtensionMethods; using Datadog.Trace.Logging; using Datadog.Trace.Tagging; using Datadog.Trace.Util; namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNet { /// <summary> /// The ASP.NET MVC integration. /// </summary> internal static class AspNetMvcIntegration { internal const string HttpContextKey = "__Datadog.Trace.ClrProfiler.Integrations.AspNetMvcIntegration"; private const string OperationName = "aspnet-mvc.request"; private const string RouteCollectionRouteTypeName = "System.Web.Mvc.Routing.RouteCollectionRoute"; private const IntegrationId IntegrationId = Configuration.IntegrationId.AspNetMvc; private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor(typeof(AspNetMvcIntegration)); /// <summary> /// Creates a scope used to instrument an MVC action and populates some common details. /// </summary> /// <param name="controllerContext">The System.Web.Mvc.ControllerContext that was passed as an argument to the instrumented method.</param> /// <returns>A new scope used to instrument an MVC action.</returns> internal static Scope CreateScope(ControllerContextStruct controllerContext) { Scope scope = null; try { var httpContext = controllerContext.HttpContext; if (httpContext == null) { return null; } Span span = null; // integration enabled, go create a scope! if (Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationId)) { var newResourceNamesEnabled = Tracer.Instance.Settings.RouteTemplateResourceNamesEnabled; string host = httpContext.Request.Headers.Get("Host"); string httpMethod = httpContext.Request.HttpMethod.ToUpperInvariant(); string url = httpContext.Request.RawUrl.ToLowerInvariant(); string resourceName = null; RouteData routeData = controllerContext.RouteData; Route route = routeData?.Route as Route; RouteValueDictionary routeValues = routeData?.Values; bool wasAttributeRouted = false; if (route == null && routeData?.Route.GetType().FullName == RouteCollectionRouteTypeName) { var routeMatches = routeValues?.GetValueOrDefault("MS_DirectRouteMatches") as List<RouteData>; if (routeMatches?.Count > 0) { // route was defined using attribute routing i.e. [Route("/path/{id}")] // get route and routeValues from the RouteData in routeMatches wasAttributeRouted = true; route = routeMatches[0].Route as Route; routeValues = routeMatches[0].Values; if (route != null) { var resourceUrl = route.Url?.ToLowerInvariant() ?? string.Empty; if (resourceUrl.FirstOrDefault() != '/') { resourceUrl = string.Concat("/", resourceUrl); } resourceName = $"{httpMethod} {resourceUrl}"; } } } string routeUrl = route?.Url; string areaName = (routeValues?.GetValueOrDefault("area") as string)?.ToLowerInvariant(); string controllerName = (routeValues?.GetValueOrDefault("controller") as string)?.ToLowerInvariant(); string actionName = (routeValues?.GetValueOrDefault("action") as string)?.ToLowerInvariant(); if (newResourceNamesEnabled && string.IsNullOrEmpty(resourceName) && !string.IsNullOrEmpty(routeUrl)) { resourceName = $"{httpMethod} /{routeUrl.ToLowerInvariant()}"; } if (string.IsNullOrEmpty(resourceName) && httpContext.Request.Url != null) { var cleanUri = UriHelpers.GetCleanUriPath(httpContext.Request.Url); resourceName = $"{httpMethod} {cleanUri.ToLowerInvariant()}"; } if (string.IsNullOrEmpty(resourceName)) { // Keep the legacy resource name, just to have something resourceName = $"{httpMethod} {controllerName}.{actionName}"; } // Replace well-known routing tokens resourceName = resourceName .Replace("{area}", areaName) .Replace("{controller}", controllerName) .Replace("{action}", actionName); if (newResourceNamesEnabled && !wasAttributeRouted && routeValues is not null && route is not null) { // Remove unused parameters from conventional route templates // Don't bother with routes defined using attribute routing foreach (var parameter in route.Defaults) { var parameterName = parameter.Key; if (parameterName != "area" && parameterName != "controller" && parameterName != "action" && !routeValues.ContainsKey(parameterName)) { resourceName = resourceName.Replace($"/{{{parameterName}}}", string.Empty); } } } SpanContext propagatedContext = null; var tracer = Tracer.Instance; var tagsFromHeaders = Enumerable.Empty<KeyValuePair<string, string>>(); if (tracer.InternalActiveScope == null) { try { // extract propagated http headers var headers = httpContext.Request.Headers.Wrap(); propagatedContext = SpanContextPropagator.Instance.Extract(headers); tagsFromHeaders = SpanContextPropagator.Instance.ExtractHeaderTags(headers, tracer.Settings.HeaderTags, SpanContextPropagator.HttpRequestHeadersTagPrefix); } catch (Exception ex) { Log.Error(ex, "Error extracting propagated HTTP headers."); } } var tags = new AspNetTags(); scope = Tracer.Instance.StartActiveInternal(OperationName, propagatedContext, tags: tags); span = scope.Span; span.DecorateWebServerSpan( resourceName: resourceName, method: httpMethod, host: host, httpUrl: url, tags, tagsFromHeaders); tags.AspNetRoute = routeUrl; tags.AspNetArea = areaName; tags.AspNetController = controllerName; tags.AspNetAction = actionName; tags.SetAnalyticsSampleRate(IntegrationId, tracer.Settings, enabledWithGlobalSetting: true); if (newResourceNamesEnabled) { // set the resource name in the HttpContext so TracingHttpModule can update root span httpContext.Items[SharedConstants.HttpContextPropagatedResourceNameKey] = resourceName; } } } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } return scope; } } } #endif
1
24,811
Does not appear to be used.
DataDog-dd-trace-dotnet
.cs
@@ -0,0 +1,15 @@ +test_name "C100546: bolt command run should execute command on remote hosts\ + via ssh" do + step "execute `bolt command run` via SSH" do + ssh_nodes = select_hosts(roles: ['ssh']) + nodes_csv = ssh_nodes.map(&:hostname).join(',') + command = 'hostname -f' + bolt_command = "bolt command run --nodes #{nodes_csv} '#{command}'" + case bolt['platform'] + when /windows/ + execute_powershell_script_on(bolt, bolt_command) + else + on(bolt, bolt_command) + end + end +end
1
1
6,668
I think this starts a new `powershell.exe` interpreter each time, which is pretty slow to run a single command. Can we just do `on(bolt, "cmd /c #{bolt_command}")`? /cc @Iristyle
puppetlabs-bolt
rb
@@ -1,14 +1,14 @@ -<% if @purchaseable.collection? %> +<% if @video_page.purchaseable.collection? %> <% content_for :additional_header_links do %> - <li class="all-videos"><%= link_to 'All Videos', @purchase %></li> + <li class="all-videos"><%= link_to 'All Videos', @video_page.purchase %></li> <% end %> <% end %> <% content_for :subject_block do %> - <h1><%= @purchaseable.name %></h1> + <h1><%= @video_page.purchaseable.name %></h1> <h2 class="tagline"> - <% if @purchaseable.collection? %> - <%= @video.title %> + <% if @video_page.purchaseable.collection? %> + <%= @video_page.video.title %> <% else %> Watch or download video <% end %>
1
<% if @purchaseable.collection? %> <% content_for :additional_header_links do %> <li class="all-videos"><%= link_to 'All Videos', @purchase %></li> <% end %> <% end %> <% content_for :subject_block do %> <h1><%= @purchaseable.name %></h1> <h2 class="tagline"> <% if @purchaseable.collection? %> <%= @video.title %> <% else %> Watch or download video <% end %> </h2> <% end %> <div class="text-box-wrapper"> <div class="text-box"> <%= render 'watch_video', video: @video, purchase: @purchase %> </div> </div> <%= render @purchaseable.to_aside_partial, purchaseable: @purchaseable, purchase: @purchase %>
1
9,183
Can we add a `collection?` method to the `VideoPage` so we don't violate Law of Demeter here?
thoughtbot-upcase
rb
@@ -1250,14 +1250,6 @@ public class ProjectManagerServlet extends LoginAbstractAzkabanServlet { page.add("errorMsg", e.getMessage()); } - final int numBytes = 1024; - - // Really sucks if we do a lot of these because it'll eat up memory fast. - // But it's expected that this won't be a heavily used thing. If it is, - // then we'll revisit it to make it more stream friendly. - final StringBuffer buffer = new StringBuffer(numBytes); - page.add("log", buffer.toString()); - page.render(); }
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.webapp.servlet; import azkaban.Constants; import azkaban.Constants.ConfigurationKeys; import azkaban.executor.ExecutableFlow; import azkaban.executor.ExecutableJobInfo; import azkaban.executor.ExecutorManagerAdapter; import azkaban.executor.ExecutorManagerException; import azkaban.executor.Status; import azkaban.flow.Edge; import azkaban.flow.Flow; import azkaban.flow.FlowProps; import azkaban.flow.Node; import azkaban.flowtrigger.quartz.FlowTriggerScheduler; import azkaban.project.Project; import azkaban.project.ProjectFileHandler; import azkaban.project.ProjectLogEvent; import azkaban.project.ProjectLogEvent.EventType; import azkaban.project.ProjectManager; import azkaban.project.ProjectManagerException; import azkaban.project.ProjectWhitelist; import azkaban.project.validator.ValidationReport; import azkaban.project.validator.ValidatorConfigs; import azkaban.scheduler.Schedule; import azkaban.scheduler.ScheduleManager; import azkaban.scheduler.ScheduleManagerException; import azkaban.server.session.Session; import azkaban.user.Permission; import azkaban.user.Permission.Type; import azkaban.user.Role; import azkaban.user.User; import azkaban.user.UserManager; import azkaban.user.UserUtils; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import azkaban.utils.Props; import azkaban.utils.PropsUtils; import azkaban.utils.Utils; import azkaban.webapp.AzkabanWebServer; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.Writer; import java.security.AccessControlException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.function.Consumer; import java.util.stream.Collectors; import javax.servlet.ServletConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.fileupload.FileItem; import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.quartz.SchedulerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ProjectManagerServlet extends LoginAbstractAzkabanServlet { static final String FLOW_IS_LOCKED_PARAM = "isLocked"; static final String FLOW_NAME_PARAM = "flowName"; static final String FLOW_ID_PARAM = "flowId"; static final String ERROR_PARAM = "error"; static final String FLOW_LOCK_ERROR_MESSAGE_PARAM = "flowLockErrorMessage"; private static final String APPLICATION_ZIP_MIME_TYPE = "application/zip"; private static final long serialVersionUID = 1; private static final Logger logger = LoggerFactory.getLogger(ProjectManagerServlet.class); private static final NodeLevelComparator NODE_LEVEL_COMPARATOR = new NodeLevelComparator(); private static final String LOCKDOWN_CREATE_PROJECTS_KEY = "lockdown.create.projects"; private static final String LOCKDOWN_UPLOAD_PROJECTS_KEY = "lockdown.upload.projects"; private static final String PROJECT_DOWNLOAD_BUFFER_SIZE_IN_BYTES = "project.download.buffer.size"; private static final Comparator<Flow> FLOW_ID_COMPARATOR = new Comparator<Flow>() { @Override public int compare(final Flow f1, final Flow f2) { return f1.getId().compareTo(f2.getId()); } }; private ProjectManager projectManager; private ExecutorManagerAdapter executorManagerAdapter; private ScheduleManager scheduleManager; private UserManager userManager; private FlowTriggerScheduler scheduler; private int downloadBufferSize; private boolean lockdownCreateProjects = false; private boolean lockdownUploadProjects = false; private boolean enableQuartz = false; @Override public void init(final ServletConfig config) throws ServletException { super.init(config); final AzkabanWebServer server = (AzkabanWebServer) getApplication(); this.projectManager = server.getProjectManager(); this.executorManagerAdapter = server.getExecutorManager(); this.scheduleManager = server.getScheduleManager(); this.userManager = server.getUserManager(); this.scheduler = server.getScheduler(); this.lockdownCreateProjects = server.getServerProps().getBoolean(LOCKDOWN_CREATE_PROJECTS_KEY, false); this.enableQuartz = server.getServerProps().getBoolean(ConfigurationKeys.ENABLE_QUARTZ, false); if (this.lockdownCreateProjects) { logger.info("Creation of projects is locked down"); } this.lockdownUploadProjects = server.getServerProps().getBoolean(LOCKDOWN_UPLOAD_PROJECTS_KEY, false); if (this.lockdownUploadProjects) { logger.info("Uploading of projects is locked down"); } this.downloadBufferSize = server.getServerProps().getInt(PROJECT_DOWNLOAD_BUFFER_SIZE_IN_BYTES, 8192); logger.info("downloadBufferSize: " + this.downloadBufferSize); } @Override protected void handleGet(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { if (hasParam(req, "project")) { if (hasParam(req, "ajax")) { handleAJAXAction(req, resp, session); } else if (hasParam(req, "logs")) { handleProjectLogsPage(req, resp, session); } else if (hasParam(req, "permissions")) { handlePermissionPage(req, resp, session); } else if (hasParam(req, "prop")) { handlePropertyPage(req, resp, session); } else if (hasParam(req, "history")) { handleJobHistoryPage(req, resp, session); } else if (hasParam(req, "job")) { handleJobPage(req, resp, session); } else if (hasParam(req, "flow")) { handleFlowPage(req, resp, session); } else if (hasParam(req, "delete")) { handleRemoveProject(req, resp, session); } else if (hasParam(req, "purge")) { handlePurgeProject(req, resp, session); } else if (hasParam(req, "download")) { handleDownloadProject(req, resp, session); } else { handleProjectPage(req, resp, session); } return; } else if (hasParam(req, "reloadProjectWhitelist")) { handleReloadProjectWhitelist(req, resp, session); } final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/projectpage.vm"); page.add("errorMsg", "No project set."); page.render(); } @Override protected void handleMultiformPost(final HttpServletRequest req, final HttpServletResponse resp, final Map<String, Object> params, final Session session) throws ServletException, IOException { // Looks like a duplicate, but this is a move away from the regular // multiform post + redirect // to a more ajax like command. if (params.containsKey("ajax")) { final String action = (String) params.get("ajax"); final HashMap<String, String> ret = new HashMap<>(); if (action.equals("upload")) { ajaxHandleUpload(req, resp, ret, params, session); } this.writeJSON(resp, ret); } else if (params.containsKey("action")) { final String action = (String) params.get("action"); if (action.equals("upload")) { handleUpload(req, resp, params, session); } } } @Override protected void handlePost(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { if (hasParam(req, "ajax")) { handleAJAXAction(req, resp, session); } else if (hasParam(req, "action")) { final String action = getParam(req, "action"); if (action.equals("create")) { handleCreate(req, resp, session); } } } private void handleAJAXAction(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { final String projectName = getParam(req, "project"); final User user = session.getUser(); final HashMap<String, Object> ret = new HashMap<>(); ret.put("project", projectName); final Project project = this.projectManager.getProject(projectName); if (project == null) { ret.put(ERROR_PARAM, "Project " + projectName + " doesn't exist."); } else { ret.put("projectId", project.getId()); final String ajaxName = getParam(req, "ajax"); if (ajaxName.equals("getProjectId")) { // Do nothing, since projectId is added to all AJAX requests. } else if (ajaxName.equals("fetchProjectLogs")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchProjectLogEvents(project, req, ret); } } else if (ajaxName.equals("fetchflowjobs")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchFlow(project, ret, req); } } else if (ajaxName.equals("fetchflowdetails")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchFlowDetails(project, ret, req); } } else if (ajaxName.equals("fetchflowgraph")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchFlowGraph(project, ret, req); } } else if (ajaxName.equals("fetchflownodedata")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchFlowNodeData(project, ret, req); } } else if (ajaxName.equals("fetchprojectflows")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchProjectFlows(project, ret, req); } } else if (ajaxName.equals("changeDescription")) { if (handleAjaxPermission(project, user, Type.WRITE, ret)) { ajaxChangeDescription(project, ret, req, user); } } else if (ajaxName.equals("getPermissions")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxGetPermissions(project, ret); } } else if (ajaxName.equals("getGroupPermissions")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxGetGroupPermissions(project, ret); } } else if (ajaxName.equals("getProxyUsers")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxGetProxyUsers(project, ret); } } else if (ajaxName.equals("changePermission")) { if (handleAjaxPermission(project, user, Type.ADMIN, ret)) { ajaxChangePermissions(project, ret, req, user); } } else if (ajaxName.equals("addPermission")) { if (handleAjaxPermission(project, user, Type.ADMIN, ret)) { ajaxAddPermission(project, ret, req, user); } } else if (ajaxName.equals("addProxyUser")) { if (handleAjaxPermission(project, user, Type.ADMIN, ret)) { ajaxAddProxyUser(project, ret, req, user); } } else if (ajaxName.equals("removeProxyUser")) { if (handleAjaxPermission(project, user, Type.ADMIN, ret)) { ajaxRemoveProxyUser(project, ret, req, user); } } else if (ajaxName.equals("fetchFlowExecutions")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchFlowExecutions(project, ret, req); } } else if (ajaxName.equals("fetchLastSuccessfulFlowExecution")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchLastSuccessfulFlowExecution(project, ret, req); } } else if (ajaxName.equals("fetchJobInfo")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxFetchJobInfo(project, ret, req); } } else if (ajaxName.equals("setJobOverrideProperty")) { if (handleAjaxPermission(project, user, Type.WRITE, ret)) { ajaxSetJobOverrideProperty(project, ret, req, user); } } else if (ajaxName.equals("checkForWritePermission")) { ajaxCheckForWritePermission(project, user, ret); } else if (ajaxName.equals("setFlowLock")) { if (handleAjaxPermission(project, user, Type.ADMIN, ret)) { ajaxSetFlowLock(project, ret, req); } } else if (ajaxName.equals("isFlowLocked")) { if (handleAjaxPermission(project, user, Type.READ, ret)) { ajaxIsFlowLocked(project, ret, req); } } else { ret.put(ERROR_PARAM, "Cannot execute command " + ajaxName); } } this.writeJSON(resp, ret); } private boolean handleAjaxPermission(final Project project, final User user, final Type type, final Map<String, Object> ret) { if (hasPermission(project, user, type)) { return true; } ret.put(ERROR_PARAM, "Permission denied. Need " + type.toString() + " access."); return false; } private void ajaxFetchProjectLogEvents(final Project project, final HttpServletRequest req, final HashMap<String, Object> ret) throws ServletException { final int num = this.getIntParam(req, "size", 1000); final int skip = this.getIntParam(req, "skip", 0); final List<ProjectLogEvent> logEvents; try { logEvents = this.projectManager.getProjectEventLogs(project, num, skip); } catch (final ProjectManagerException e) { throw new ServletException(e); } final String[] columns = new String[]{"user", "time", "type", "message"}; ret.put("columns", columns); final List<Object[]> eventData = new ArrayList<>(); for (final ProjectLogEvent events : logEvents) { final Object[] entry = new Object[4]; entry[0] = events.getUser(); entry[1] = events.getTime(); entry[2] = events.getType(); entry[3] = events.getMessage(); eventData.add(entry); } ret.put("logData", eventData); } private List<String> getFlowJobTypes(final Flow flow) { final Set<String> jobTypeSet = new HashSet<>(); for (final Node node : flow.getNodes()) { jobTypeSet.add(node.getType()); } final List<String> jobTypes = new ArrayList<>(); jobTypes.addAll(jobTypeSet); return jobTypes; } private void ajaxFetchFlowDetails(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowName = getParam(req, "flow"); try { final Flow flow = project.getFlow(flowName); if (flow == null) { ret.put(ERROR_PARAM, "Flow " + flowName + " not found."); return; } ret.put("jobTypes", getFlowJobTypes(flow)); if (flow.getCondition() != null) { ret.put("condition", flow.getCondition()); } } catch (final AccessControlException e) { ret.put(ERROR_PARAM, e.getMessage()); } } private void ajaxFetchLastSuccessfulFlowExecution(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowId = getParam(req, "flow"); List<ExecutableFlow> exFlows = null; try { exFlows = this.executorManagerAdapter.getExecutableFlows(project.getId(), flowId, 0, 1, Status.SUCCEEDED); } catch (final ExecutorManagerException e) { ret.put(ERROR_PARAM, "Error retrieving executable flows"); return; } if (exFlows.size() == 0) { ret.put("success", "false"); ret.put("message", "This flow has no successful run."); return; } ret.put("success", "true"); ret.put("message", ""); ret.put("execId", exFlows.get(0).getExecutionId()); } private void ajaxFetchFlowExecutions(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowId = getParam(req, "flow"); final int from = Integer.valueOf(getParam(req, "start")); final int length = Integer.valueOf(getParam(req, "length")); final ArrayList<ExecutableFlow> exFlows = new ArrayList<>(); int total = 0; try { total = this.executorManagerAdapter.getExecutableFlows(project.getId(), flowId, from, length, exFlows); } catch (final ExecutorManagerException e) { ret.put(ERROR_PARAM, "Error retrieving executable flows"); } ret.put("flow", flowId); ret.put("total", total); ret.put("from", from); ret.put("length", length); final ArrayList<Object> history = new ArrayList<>(); for (final ExecutableFlow flow : exFlows) { final HashMap<String, Object> flowInfo = new HashMap<>(); flowInfo.put("execId", flow.getExecutionId()); flowInfo.put(FLOW_ID_PARAM, flow.getFlowId()); flowInfo.put("projectId", flow.getProjectId()); flowInfo.put("status", flow.getStatus().toString()); flowInfo.put("submitTime", flow.getSubmitTime()); flowInfo.put("startTime", flow.getStartTime()); flowInfo.put("endTime", flow.getEndTime()); flowInfo.put("submitUser", flow.getSubmitUser()); history.add(flowInfo); } ret.put("executions", history); } /** * Download project zip file from DB and send it back client. * * This method requires a project name and an optional project version. */ private void handleDownloadProject(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { final User user = session.getUser(); final String projectName = getParam(req, "project"); logger.info(user.getUserId() + " is downloading project: " + projectName); final Project project = this.projectManager.getProject(projectName); if (project == null) { this.setErrorMessageInCookie(resp, "Project " + projectName + " doesn't exist."); resp.sendRedirect(req.getContextPath()); return; } if (!hasPermission(project, user, Type.READ)) { this.setErrorMessageInCookie(resp, "No permission to download project " + projectName + "."); resp.sendRedirect(req.getContextPath()); return; } int version = -1; if (hasParam(req, "version")) { version = getIntParam(req, "version"); } ProjectFileHandler projectFileHandler = null; FileInputStream inStream = null; OutputStream outStream = null; try { projectFileHandler = this.projectManager.getProjectFileHandler(project, version); if (projectFileHandler == null) { this.setErrorMessageInCookie(resp, "Project " + projectName + " with version " + version + " doesn't exist"); resp.sendRedirect(req.getContextPath()); return; } final File projectZipFile = projectFileHandler.getLocalFile(); final String logStr = String.format( "downloading project zip file for project \"%s\" at \"%s\"" + " size: %d type: %s fileName: \"%s\"", projectFileHandler.getFileName(), projectZipFile.getAbsolutePath(), projectZipFile.length(), projectFileHandler.getFileType(), projectFileHandler.getFileName()); logger.info(logStr); // now set up HTTP response for downloading file inStream = new FileInputStream(projectZipFile); resp.setContentType(APPLICATION_ZIP_MIME_TYPE); final String headerKey = "Content-Disposition"; final String headerValue = String.format("attachment; filename=\"%s\"", projectFileHandler.getFileName()); resp.setHeader(headerKey, headerValue); resp.setHeader("version", Integer.toString(projectFileHandler.getVersion())); resp.setHeader("projectId", Integer.toString(projectFileHandler.getProjectId())); outStream = resp.getOutputStream(); final byte[] buffer = new byte[this.downloadBufferSize]; int bytesRead = -1; while ((bytesRead = inStream.read(buffer)) != -1) { outStream.write(buffer, 0, bytesRead); } } catch (final Throwable e) { logger.error( "Encountered error while downloading project zip file for project: " + projectName + " by user: " + user.getUserId(), e); throw new ServletException(e); } finally { IOUtils.closeQuietly(inStream); IOUtils.closeQuietly(outStream); if (projectFileHandler != null) { projectFileHandler.deleteLocalFile(); } } } /** * validate readiness of a project and user permission and use projectManager to purge the project * if things looks good **/ private void handlePurgeProject(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { final User user = session.getUser(); final HashMap<String, Object> ret = new HashMap<>(); boolean isOperationSuccessful = true; try { Project project = null; final String projectParam = getParam(req, "project"); if (StringUtils.isNumeric(projectParam)) { project = this.projectManager.getProject(Integer.parseInt(projectParam)); // get // project // by // Id } else { project = this.projectManager.getProject(projectParam); // get project by // name (name cannot // start // from ints) } // invalid project if (project == null) { ret.put(ERROR_PARAM, "invalid project"); isOperationSuccessful = false; } // project is already deleted if (isOperationSuccessful && this.projectManager.isActiveProject(project.getId())) { ret.put(ERROR_PARAM, "Project " + project.getName() + " should be deleted before purging"); isOperationSuccessful = false; } // only eligible users can purge a project if (isOperationSuccessful && !hasPermission(project, user, Type.ADMIN)) { ret.put(ERROR_PARAM, "Cannot purge. User '" + user.getUserId() + "' is not an ADMIN."); isOperationSuccessful = false; } if (isOperationSuccessful) { this.projectManager.purgeProject(project, user); } } catch (final Exception e) { ret.put(ERROR_PARAM, e.getMessage()); isOperationSuccessful = false; } ret.put("success", isOperationSuccessful); this.writeJSON(resp, ret); } private void removeAssociatedSchedules(final Project project) throws ServletException { // remove regular schedules try { for (final Schedule schedule : this.scheduleManager.getSchedules()) { if (schedule.getProjectId() == project.getId()) { logger.info("removing schedule " + schedule.getScheduleId()); this.scheduleManager.removeSchedule(schedule); } } } catch (final ScheduleManagerException e) { throw new ServletException(e); } // remove flow trigger schedules try { if (this.enableQuartz) { this.scheduler.unschedule(project); } } catch (final SchedulerException e) { throw new ServletException(e); } } private void handleRemoveProject(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { final User user = session.getUser(); final String projectName = getParam(req, "project"); final Project project = this.projectManager.getProject(projectName); if (project == null) { this.setErrorMessageInCookie(resp, "Project " + projectName + " doesn't exist."); resp.sendRedirect(req.getContextPath()); return; } if (!hasPermission(project, user, Type.ADMIN)) { this.setErrorMessageInCookie(resp, "Cannot delete. User '" + user.getUserId() + "' is not an ADMIN."); resp.sendRedirect(req.getRequestURI() + "?project=" + projectName); return; } removeAssociatedSchedules(project); try { this.projectManager.removeProject(project, user); } catch (final ProjectManagerException e) { this.setErrorMessageInCookie(resp, e.getMessage()); resp.sendRedirect(req.getRequestURI() + "?project=" + projectName); return; } this.setSuccessMessageInCookie(resp, "Project '" + projectName + "' was successfully deleted and associated schedules are removed."); resp.sendRedirect(req.getContextPath()); } private void ajaxChangeDescription(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req, final User user) throws ServletException { final String description = getParam(req, "description"); project.setDescription(description); try { this.projectManager.updateProjectDescription(project, description, user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, e.getMessage()); } } private void ajaxFetchJobInfo(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowName = getParam(req, "flowName"); final String jobName = getParam(req, "jobName"); final Flow flow = project.getFlow(flowName); if (flow == null) { ret.put(ERROR_PARAM, "Flow " + flowName + " not found in project " + project.getName()); return; } final Node node = flow.getNode(jobName); if (node == null) { ret.put(ERROR_PARAM, "Job " + jobName + " not found in flow " + flowName); return; } Props jobProp; try { jobProp = this.projectManager.getProperties(project, flow, jobName, node.getJobSource()); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, "Failed to retrieve job properties!"); return; } if (jobProp == null) { jobProp = new Props(); } Props overrideProp; try { overrideProp = this.projectManager .getJobOverrideProperty(project, flow, jobName, node.getJobSource()); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, "Failed to retrieve job override properties!"); return; } ret.put("jobName", node.getId()); ret.put("jobType", jobProp.get("type")); if (overrideProp == null) { overrideProp = new Props(jobProp); } final Map<String, String> generalParams = new HashMap<>(); final Map<String, String> overrideParams = new HashMap<>(); for (final String ps : jobProp.getKeySet()) { generalParams.put(ps, jobProp.getString(ps)); } for (final String ops : overrideProp.getKeySet()) { overrideParams.put(ops, overrideProp.getString(ops)); } ret.put("generalParams", generalParams); ret.put("overrideParams", overrideParams); } private void ajaxSetJobOverrideProperty(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req, final User user) throws ServletException { final String flowName = getParam(req, "flowName"); final String jobName = getParam(req, "jobName"); final Flow flow = project.getFlow(flowName); if (flow == null) { ret.put(ERROR_PARAM, "Flow " + flowName + " not found in project " + project.getName()); return; } final Node node = flow.getNode(jobName); if (node == null) { ret.put(ERROR_PARAM, "Job " + jobName + " not found in flow " + flowName); return; } final Map<String, String> jobParamGroup = this.getParamGroup(req, "jobOverride"); final Props overrideParams = new Props(null, jobParamGroup); try { this.projectManager .setJobOverrideProperty(project, flow, overrideParams, jobName, node.getJobSource(), user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, "Failed to upload job override property"); } } private void ajaxFetchProjectFlows(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final ArrayList<Map<String, Object>> flowList = new ArrayList<>(); for (final Flow flow : project.getFlows()) { if (!flow.isEmbeddedFlow()) { final HashMap<String, Object> flowObj = new HashMap<>(); flowObj.put(FLOW_ID_PARAM, flow.getId()); flowList.add(flowObj); } } ret.put("flows", flowList); } private void ajaxFetchFlowGraph(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowId = getParam(req, "flow"); fillFlowInfo(project, flowId, ret); } private void fillFlowInfo(final Project project, final String flowId, final HashMap<String, Object> ret) { final Flow flow = project.getFlow(flowId); if (flow == null) { ret.put(ERROR_PARAM, "Flow " + flowId + " not found in project " + project.getName()); return; } final ArrayList<Map<String, Object>> nodeList = new ArrayList<>(); for (final Node node : flow.getNodes()) { final HashMap<String, Object> nodeObj = new HashMap<>(); nodeObj.put("id", node.getId()); nodeObj.put("type", node.getType()); if (node.getCondition() != null) { nodeObj.put("condition", node.getCondition()); } if (node.getEmbeddedFlowId() != null) { nodeObj.put(FLOW_ID_PARAM, node.getEmbeddedFlowId()); fillFlowInfo(project, node.getEmbeddedFlowId(), nodeObj); } nodeList.add(nodeObj); final Set<Edge> inEdges = flow.getInEdges(node.getId()); if (inEdges != null && !inEdges.isEmpty()) { final ArrayList<String> inEdgesList = new ArrayList<>(); for (final Edge edge : inEdges) { inEdgesList.add(edge.getSourceId()); } Collections.sort(inEdgesList); nodeObj.put("in", inEdgesList); } } Collections.sort(nodeList, new Comparator<Map<String, Object>>() { @Override public int compare(final Map<String, Object> o1, final Map<String, Object> o2) { final String id = (String) o1.get("id"); return id.compareTo((String) o2.get("id")); } }); ret.put("flow", flowId); ret.put("nodes", nodeList); } private void ajaxFetchFlowNodeData(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowId = getParam(req, "flow"); final Flow flow = project.getFlow(flowId); final String nodeId = getParam(req, "node"); final Node node = flow.getNode(nodeId); if (node == null) { ret.put(ERROR_PARAM, "Job " + nodeId + " doesn't exist."); return; } ret.put("id", nodeId); ret.put("flow", flowId); ret.put("type", node.getType()); final Props jobProps; try { jobProps = this.projectManager.getProperties(project, flow, nodeId, node.getJobSource()); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, "Failed to upload job override property for " + nodeId); return; } if (jobProps == null) { ret.put(ERROR_PARAM, "Properties for " + nodeId + " isn't found."); return; } final Map<String, String> properties = PropsUtils.toStringMap(jobProps, true); ret.put("props", properties); if (node.getType().equals("flow")) { if (node.getEmbeddedFlowId() != null) { fillFlowInfo(project, node.getEmbeddedFlowId(), ret); } } } private void ajaxFetchFlow(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowId = getParam(req, "flow"); final Flow flow = project.getFlow(flowId); final ArrayList<Node> flowNodes = new ArrayList<>(flow.getNodes()); Collections.sort(flowNodes, NODE_LEVEL_COMPARATOR); final ArrayList<Object> nodeList = new ArrayList<>(); for (final Node node : flowNodes) { final HashMap<String, Object> nodeObj = new HashMap<>(); nodeObj.put("id", node.getId()); final ArrayList<String> dependencies = new ArrayList<>(); Collection<Edge> collection = flow.getInEdges(node.getId()); if (collection != null) { for (final Edge edge : collection) { dependencies.add(edge.getSourceId()); } } final ArrayList<String> dependents = new ArrayList<>(); collection = flow.getOutEdges(node.getId()); if (collection != null) { for (final Edge edge : collection) { dependents.add(edge.getTargetId()); } } nodeObj.put("dependencies", dependencies); nodeObj.put("dependents", dependents); nodeObj.put("level", node.getLevel()); nodeList.add(nodeObj); } ret.put(FLOW_ID_PARAM, flowId); ret.put("nodes", nodeList); ret.put(FLOW_IS_LOCKED_PARAM, flow.isLocked()); } private void ajaxAddProxyUser(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req, final User user) throws ServletException { final String name = getParam(req, "name"); logger.info("Adding proxy user " + name + " by " + user.getUserId()); if (this.userManager.validateProxyUser(name, user)) { try { this.projectManager.addProjectProxyUser(project, name, user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, e.getMessage()); } } else { ret.put(ERROR_PARAM, "User " + user.getUserId() + " has no permission to add " + name + " as proxy user."); return; } } private void ajaxRemoveProxyUser(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req, final User user) throws ServletException { final String name = getParam(req, "name"); logger.info("Removing proxy user " + name + " by " + user.getUserId()); try { this.projectManager.removeProjectProxyUser(project, name, user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, e.getMessage()); } } private void ajaxAddPermission(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req, final User user) throws ServletException { final String name = getParam(req, "name"); final boolean group = Boolean.parseBoolean(getParam(req, "group")); if (group) { if (project.getGroupPermission(name) != null) { ret.put(ERROR_PARAM, "Group permission already exists."); return; } if (!this.userManager.validateGroup(name)) { ret.put(ERROR_PARAM, "Group is invalid."); return; } } else { if (project.getUserPermission(name) != null) { ret.put(ERROR_PARAM, "User permission already exists."); return; } if (!this.userManager.validateUser(name)) { ret.put(ERROR_PARAM, "User is invalid."); return; } } final boolean admin = Boolean.parseBoolean(getParam(req, "permissions[admin]")); final boolean read = Boolean.parseBoolean(getParam(req, "permissions[read]")); final boolean write = Boolean.parseBoolean(getParam(req, "permissions[write]")); final boolean execute = Boolean.parseBoolean(getParam(req, "permissions[execute]")); final boolean schedule = Boolean.parseBoolean(getParam(req, "permissions[schedule]")); final Permission perm = new Permission(); if (admin) { perm.setPermission(Type.ADMIN, true); } else { perm.setPermission(Type.READ, read); perm.setPermission(Type.WRITE, write); perm.setPermission(Type.EXECUTE, execute); perm.setPermission(Type.SCHEDULE, schedule); } try { this.projectManager.updateProjectPermission(project, name, perm, group, user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, e.getMessage()); } } private void ajaxChangePermissions(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req, final User user) throws ServletException { final boolean admin = Boolean.parseBoolean(getParam(req, "permissions[admin]")); final boolean read = Boolean.parseBoolean(getParam(req, "permissions[read]")); final boolean write = Boolean.parseBoolean(getParam(req, "permissions[write]")); final boolean execute = Boolean.parseBoolean(getParam(req, "permissions[execute]")); final boolean schedule = Boolean.parseBoolean(getParam(req, "permissions[schedule]")); final boolean group = Boolean.parseBoolean(getParam(req, "group")); final String name = getParam(req, "name"); final Permission perm; if (group) { perm = project.getGroupPermission(name); } else { perm = project.getUserPermission(name); } if (perm == null) { ret.put(ERROR_PARAM, "Permissions for " + name + " cannot be found."); return; } if (admin || read || write || execute || schedule) { if (admin) { perm.setPermission(Type.ADMIN, true); perm.setPermission(Type.READ, false); perm.setPermission(Type.WRITE, false); perm.setPermission(Type.EXECUTE, false); perm.setPermission(Type.SCHEDULE, false); } else { perm.setPermission(Type.ADMIN, false); perm.setPermission(Type.READ, read); perm.setPermission(Type.WRITE, write); perm.setPermission(Type.EXECUTE, execute); perm.setPermission(Type.SCHEDULE, schedule); } try { this.projectManager .updateProjectPermission(project, name, perm, group, user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, e.getMessage()); } } else { try { this.projectManager.removeProjectPermission(project, name, group, user); } catch (final ProjectManagerException e) { ret.put(ERROR_PARAM, e.getMessage()); } } } /** * this only returns user permissions, but not group permissions and proxy users */ private void ajaxGetPermissions(final Project project, final HashMap<String, Object> ret) { final ArrayList<HashMap<String, Object>> permissions = new ArrayList<>(); for (final Pair<String, Permission> perm : project.getUserPermissions()) { final HashMap<String, Object> permObj = new HashMap<>(); final String userId = perm.getFirst(); permObj.put("username", userId); permObj.put("permission", perm.getSecond().toStringArray()); permissions.add(permObj); } ret.put("permissions", permissions); } private void ajaxGetGroupPermissions(final Project project, final HashMap<String, Object> ret) { final ArrayList<HashMap<String, Object>> permissions = new ArrayList<>(); for (final Pair<String, Permission> perm : project.getGroupPermissions()) { final HashMap<String, Object> permObj = new HashMap<>(); final String userId = perm.getFirst(); permObj.put("username", userId); permObj.put("permission", perm.getSecond().toStringArray()); permissions.add(permObj); } ret.put("permissions", permissions); } private void ajaxGetProxyUsers(final Project project, final HashMap<String, Object> ret) { final String[] proxyUsers = project.getProxyUsers().toArray(new String[0]); ret.put("proxyUsers", proxyUsers); } private void ajaxCheckForWritePermission(final Project project, final User user, final HashMap<String, Object> ret) { ret.put("hasWritePermission", hasPermission(project, user, Type.WRITE)); } /** * Set if a flow is locked. * * @param project the project for the flow. * @param ret the return value. * @param req the http request. */ private void ajaxSetFlowLock(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowName = getParam(req, FLOW_NAME_PARAM); final Flow flow = project.getFlow(flowName); if (flow == null) { ret.put(ERROR_PARAM, "Flow " + flowName + " not found in project " + project.getName()); return; } final boolean isLocked = Boolean.parseBoolean(getParam(req, FLOW_IS_LOCKED_PARAM)); String flowLockErrorMessage = null; try { flowLockErrorMessage = getParam(req, FLOW_LOCK_ERROR_MESSAGE_PARAM); } catch(final Exception e) { logger.info("Unable to get flow lock error message"); } // if there is a change in the locked value, then check to see if the project has a flow trigger // that needs to be paused/resumed. if (isLocked != flow.isLocked()) { try { if (this.projectManager.hasFlowTrigger(project, flow)) { if (isLocked) { if (this.scheduler.pauseFlowTriggerIfPresent(project.getId(), flow.getId())) { logger.info("Flow trigger for flow " + project.getName() + "." + flow.getId() + " is paused"); } else { logger.warn("Flow trigger for flow " + project.getName() + "." + flow.getId() + " doesn't exist"); } } else { if (this.scheduler.resumeFlowTriggerIfPresent(project.getId(), flow.getId())) { logger.info("Flow trigger for flow " + project.getName() + "." + flow.getId() + " is resumed"); } else { logger.warn("Flow trigger for flow " + project.getName() + "." + flow.getId() + " doesn't exist"); } } } } catch (final Exception e) { ret.put(ERROR_PARAM, e); } } flow.setLocked(isLocked); flow.setFlowLockErrorMessage(isLocked ? flowLockErrorMessage : null); ret.put(FLOW_IS_LOCKED_PARAM, flow.isLocked()); ret.put(FLOW_ID_PARAM, flow.getId()); ret.put(FLOW_LOCK_ERROR_MESSAGE_PARAM, flow.getFlowLockErrorMessage()); this.projectManager.updateFlow(project, flow); } /** * Returns true if the flow is locked, false if it is unlocked. * * @param project the project containing the flow. * @param ret the return value. * @param req the http request. */ private void ajaxIsFlowLocked(final Project project, final HashMap<String, Object> ret, final HttpServletRequest req) throws ServletException { final String flowName = getParam(req, FLOW_NAME_PARAM); final Flow flow = project.getFlow(flowName); if (flow == null) { ret.put(ERROR_PARAM, "Flow " + flowName + " not found in project " + project.getName()); return; } ret.put(FLOW_ID_PARAM, flow.getId()); ret.put(FLOW_IS_LOCKED_PARAM, flow.isLocked()); } private void handleProjectLogsPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/projectlogpage.vm"); final String projectName = getParam(req, "project"); final User user = session.getUser(); PageUtils .hideUploadButtonWhenNeeded(page, session, this.userManager, this.lockdownUploadProjects); Project project = null; try { project = this.projectManager.getProject(projectName); if (project == null) { page.add("errorMsg", "Project " + projectName + " doesn't exist."); } else { if (!hasPermission(project, user, Type.READ)) { throw new AccessControlException("No permission to view project " + projectName + "."); } page.add("project", project); page.add("admins", Utils.flattenToString( project.getUsersWithPermission(Type.ADMIN), ",")); final Permission perm = this.getPermissionObject(project, user, Type.ADMIN); page.add("userpermission", perm); final boolean adminPerm = perm.isPermissionSet(Type.ADMIN); if (adminPerm) { page.add("admin", true); } // Set this so we can display execute buttons only to those who have // access. if (perm.isPermissionSet(Type.EXECUTE) || adminPerm) { page.add("exec", true); } else { page.add("exec", false); } } } catch (final AccessControlException e) { page.add("errorMsg", e.getMessage()); } final int numBytes = 1024; // Really sucks if we do a lot of these because it'll eat up memory fast. // But it's expected that this won't be a heavily used thing. If it is, // then we'll revisit it to make it more stream friendly. final StringBuffer buffer = new StringBuffer(numBytes); page.add("log", buffer.toString()); page.render(); } private void handleJobHistoryPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException, IOException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/jobhistorypage.vm"); final String jobId = getParam(req, "job"); page.add("jobId", jobId); int pageNum = Math.max(1, getIntParam(req, "page", 1)); page.add("page", pageNum); final int pageSize = Math.max(1, getIntParam(req, "size", 25)); page.add("pageSize", pageSize); page.add("recordCount", 0); page.add("projectId", ""); page.add("projectName", ""); page.add("dataSeries", "[]"); page.add("history", null); final String projectName = getParam(req, "project"); final User user = session.getUser(); final Project project = this.projectManager.getProject(projectName); if (project == null) { page.add("errorMsg", "Project " + projectName + " doesn't exist."); page.render(); return; } if (!hasPermission(project, user, Type.READ)) { page.add("errorMsg", "No permission to view project " + projectName + "."); page.render(); return; } page.add("projectId", project.getId()); page.add("projectName", project.getName()); try { final int numResults = this.executorManagerAdapter.getNumberOfJobExecutions(project, jobId); page.add("recordCount", numResults); final int totalPages = ((numResults - 1) / pageSize) + 1; if (pageNum > totalPages) { pageNum = totalPages; page.add("page", pageNum); } final int elementsToSkip = (pageNum - 1) * pageSize; final List<ExecutableJobInfo> jobInfo = this.executorManagerAdapter.getExecutableJobs(project, jobId, elementsToSkip, pageSize); if (CollectionUtils.isNotEmpty(jobInfo)) { page.add("history", jobInfo); final ArrayList<Object> dataSeries = new ArrayList<>(); for (final ExecutableJobInfo info : jobInfo) { final Map<String, Object> map = info.toObject(); dataSeries.add(map); } page.add("dataSeries", JSONUtils.toJSON(dataSeries)); } } catch (final ExecutorManagerException e) { page.add("errorMsg", e.getMessage()); } page.render(); } private void handlePermissionPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/permissionspage.vm"); final String projectName = getParam(req, "project"); final User user = session.getUser(); PageUtils .hideUploadButtonWhenNeeded(page, session, this.userManager, this.lockdownUploadProjects); Project project = null; try { project = this.projectManager.getProject(projectName); if (project == null) { page.add("errorMsg", "Project " + projectName + " not found."); } else { if (!hasPermission(project, user, Type.READ)) { throw new AccessControlException("No permission to view project " + projectName + "."); } page.add("project", project); page.add("username", user.getUserId()); page.add("admins", Utils.flattenToString( project.getUsersWithPermission(Type.ADMIN), ",")); final Permission perm = this.getPermissionObject(project, user, Type.ADMIN); page.add("userpermission", perm); if (perm.isPermissionSet(Type.ADMIN)) { page.add("admin", true); } final List<Pair<String, Permission>> userPermission = project.getUserPermissions(); if (userPermission != null && !userPermission.isEmpty()) { page.add("permissions", userPermission); } final List<Pair<String, Permission>> groupPermission = project.getGroupPermissions(); if (groupPermission != null && !groupPermission.isEmpty()) { page.add("groupPermissions", groupPermission); } final Set<String> proxyUsers = project.getProxyUsers(); if (proxyUsers != null && !proxyUsers.isEmpty()) { page.add("proxyUsers", proxyUsers); } if (hasPermission(project, user, Type.ADMIN)) { page.add("isAdmin", true); } } } catch (final AccessControlException e) { page.add("errorMsg", e.getMessage()); } page.render(); } private void handleJobPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/jobpage.vm"); final String projectName = getParam(req, "project"); final String flowName = getParam(req, "flow"); final String jobName = getParam(req, "job"); final User user = session.getUser(); Project project = null; Flow flow = null; try { project = this.projectManager.getProject(projectName); logger.info("JobPage: project " + projectName + " version is " + project.getVersion() + ", reference is " + System.identityHashCode(project)); if (project == null) { page.add("errorMsg", "Project " + projectName + " not found."); page.render(); return; } if (!hasPermission(project, user, Type.READ)) { throw new AccessControlException("No permission to view project " + projectName + "."); } page.add("project", project); flow = project.getFlow(flowName); if (flow == null) { page.add("errorMsg", "Flow " + flowName + " not found."); page.render(); return; } page.add("flowid", flow.getId()); final Node node = flow.getNode(jobName); if (node == null) { page.add("errorMsg", "Job " + jobName + " not found."); page.render(); return; } Props jobProp = this.projectManager .getJobOverrideProperty(project, flow, jobName, node.getJobSource()); if (jobProp == null) { jobProp = this.projectManager.getProperties(project, flow, jobName, node.getJobSource()); } page.add("jobid", node.getId()); page.add("jobtype", node.getType()); if (node.getCondition() != null) { page.add("condition", node.getCondition()); } final ArrayList<String> dependencies = new ArrayList<>(); final Set<Edge> inEdges = flow.getInEdges(node.getId()); if (inEdges != null) { for (final Edge dependency : inEdges) { dependencies.add(dependency.getSourceId()); } } if (!dependencies.isEmpty()) { page.add("dependencies", dependencies); } final ArrayList<String> dependents = new ArrayList<>(); final Set<Edge> outEdges = flow.getOutEdges(node.getId()); if (outEdges != null) { for (final Edge dependent : outEdges) { dependents.add(dependent.getTargetId()); } } if (!dependents.isEmpty()) { page.add("dependents", dependents); } // Resolve property dependencies final ArrayList<String> source = new ArrayList<>(); final String nodeSource = node.getPropsSource(); if (nodeSource != null) { source.add(nodeSource); FlowProps parent = flow.getFlowProps(nodeSource); while (parent.getInheritedSource() != null) { source.add(parent.getInheritedSource()); parent = flow.getFlowProps(parent.getInheritedSource()); } } if (!source.isEmpty()) { page.add("properties", source); } final ArrayList<Pair<String, String>> parameters = new ArrayList<>(); // Parameter for (final String key : jobProp.getKeySet()) { final String value = jobProp.get(key); parameters.add(new Pair<>(key, value)); } page.add("parameters", parameters); } catch (final AccessControlException e) { page.add("errorMsg", e.getMessage()); } catch (final ProjectManagerException e) { page.add("errorMsg", e.getMessage()); } page.render(); } private void handlePropertyPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/propertypage.vm"); final String projectName = getParam(req, "project"); final String flowName = getParam(req, "flow"); final String jobName = getParam(req, "job"); final String propSource = getParam(req, "prop"); final User user = session.getUser(); Project project = null; Flow flow = null; try { project = this.projectManager.getProject(projectName); if (project == null) { page.add("errorMsg", "Project " + projectName + " not found."); logger.info("Display project property. Project " + projectName + " not found."); page.render(); return; } if (!hasPermission(project, user, Type.READ)) { throw new AccessControlException("No permission to view project " + projectName + "."); } page.add("project", project); flow = project.getFlow(flowName); if (flow == null) { page.add("errorMsg", "Flow " + flowName + " not found."); logger.info("Display project property. Project " + projectName + " Flow " + flowName + " not found."); page.render(); return; } page.add("flowid", flow.getId()); final Node node = flow.getNode(jobName); if (node == null) { page.add("errorMsg", "Job " + jobName + " not found."); logger.info("Display project property. Project " + projectName + " Flow " + flowName + " Job " + jobName + " not found."); page.render(); return; } final Props prop = this.projectManager.getProperties(project, flow, null, propSource); if (prop == null) { page.add("errorMsg", "Property " + propSource + " not found."); logger.info("Display project property. Project " + projectName + " Flow " + flowName + " Job " + jobName + " Property " + propSource + " not found."); page.render(); return; } page.add("property", propSource); page.add("jobid", node.getId()); // Resolve property dependencies final ArrayList<String> inheritProps = new ArrayList<>(); FlowProps parent = flow.getFlowProps(propSource); while (parent.getInheritedSource() != null) { inheritProps.add(parent.getInheritedSource()); parent = flow.getFlowProps(parent.getInheritedSource()); } if (!inheritProps.isEmpty()) { page.add("inheritedproperties", inheritProps); } final ArrayList<String> dependingProps = new ArrayList<>(); FlowProps child = flow.getFlowProps(flow.getNode(jobName).getPropsSource()); while (!child.getSource().equals(propSource)) { dependingProps.add(child.getSource()); child = flow.getFlowProps(child.getInheritedSource()); } if (!dependingProps.isEmpty()) { page.add("dependingproperties", dependingProps); } final ArrayList<Pair<String, String>> parameters = new ArrayList<>(); // Parameter for (final String key : prop.getKeySet()) { final String value = prop.get(key); parameters.add(new Pair<>(key, value)); } page.add("parameters", parameters); } catch (final AccessControlException e) { page.add("errorMsg", e.getMessage()); } catch (final ProjectManagerException e) { page.add("errorMsg", e.getMessage()); } page.render(); } private void handleFlowPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/flowpage.vm"); final String projectName = getParam(req, "project"); final String flowName = getParam(req, "flow"); final User user = session.getUser(); Project project = null; Flow flow = null; try { project = this.projectManager.getProject(projectName); if (project == null) { page.add("errorMsg", "Project " + projectName + " not found."); page.render(); return; } if (!hasPermission(project, user, Type.READ)) { throw new AccessControlException("No permission Project " + projectName + "."); } page.add("project", project); flow = project.getFlow(flowName); if (flow == null) { page.add("errorMsg", "Flow " + flowName + " not found."); } else { page.add("flowid", flow.getId()); page.add("isLocked", flow.isLocked()); if (flow.isLocked()) { final Props props = this.projectManager.getProps(); final String flowLockErrorMessage = flow.getFlowLockErrorMessage(); final String lockedFlowMsg = flowLockErrorMessage != null ? flowLockErrorMessage : String.format(props.getString(ConfigurationKeys .AZKABAN_LOCKED_FLOW_ERROR_MESSAGE, Constants.DEFAULT_LOCKED_FLOW_ERROR_MESSAGE), flow.getId(), projectName); page.add("error_message", lockedFlowMsg); } } } catch (final AccessControlException e) { page.add("errorMsg", e.getMessage()); } page.render(); } private void handleProjectPage(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException { final Page page = newPage(req, resp, session, "azkaban/webapp/servlet/velocity/projectpage.vm"); final String projectName = getParam(req, "project"); final User user = session.getUser(); PageUtils .hideUploadButtonWhenNeeded(page, session, this.userManager, this.lockdownUploadProjects); Project project = null; try { project = this.projectManager.getProject(projectName); if (project == null) { page.add("errorMsg", "Project " + projectName + " not found."); } else { if (!hasPermission(project, user, Type.READ)) { throw new AccessControlException("No permission to view project " + projectName + "."); } page.add("project", project); page.add("admins", Utils.flattenToString( project.getUsersWithPermission(Type.ADMIN), ",")); final Permission perm = this.getPermissionObject(project, user, Type.ADMIN); page.add("userpermission", perm); page.add( "validatorFixPrompt", this.projectManager.getProps().getBoolean( ValidatorConfigs.VALIDATOR_AUTO_FIX_PROMPT_FLAG_PARAM, ValidatorConfigs.DEFAULT_VALIDATOR_AUTO_FIX_PROMPT_FLAG)); page.add( "validatorFixLabel", this.projectManager.getProps().get( ValidatorConfigs.VALIDATOR_AUTO_FIX_PROMPT_LABEL_PARAM)); page.add( "validatorFixLink", this.projectManager.getProps().get( ValidatorConfigs.VALIDATOR_AUTO_FIX_PROMPT_LINK_PARAM)); final boolean adminPerm = perm.isPermissionSet(Type.ADMIN); if (adminPerm) { page.add("admin", true); } // Set this so we can display execute buttons only to those who have // access. if (perm.isPermissionSet(Type.EXECUTE) || adminPerm) { page.add("exec", true); } else { page.add("exec", false); } final List<Flow> flows = project.getFlows().stream().filter(flow -> !flow.isEmbeddedFlow()) .collect(Collectors.toList()); if (!flows.isEmpty()) { Collections.sort(flows, FLOW_ID_COMPARATOR); page.add("flows", flows); } } } catch (final AccessControlException e) { page.add("errorMsg", e.getMessage()); } page.render(); } private void handleCreate(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws ServletException { final String projectName = hasParam(req, "name") ? getParam(req, "name") : null; final String projectDescription = hasParam(req, "description") ? getParam(req, "description") : null; logger.info("Create project " + projectName); final User user = session.getUser(); String status = null; String action = null; String message = null; HashMap<String, Object> params = null; if (this.lockdownCreateProjects && !UserUtils.hasPermissionforAction(this.userManager, user, Type.CREATEPROJECTS)) { message = "User " + user.getUserId() + " doesn't have permission to create projects."; logger.info(message); status = ERROR_PARAM; } else { try { this.projectManager.createProject(projectName, projectDescription, user); status = "success"; action = "redirect"; final String redirect = "manager?project=" + projectName; params = new HashMap<>(); params.put("path", redirect); } catch (final ProjectManagerException e) { message = e.getMessage(); status = ERROR_PARAM; } } final String response = AbstractAzkabanServlet .createJsonResponse(status, message, action, params); try { final Writer write = resp.getWriter(); write.append(response); write.flush(); } catch (final IOException e) { e.printStackTrace(); } } private void registerError(final Map<String, String> ret, final String error, final HttpServletResponse resp, final int returnCode) { ret.put(ERROR_PARAM, error); resp.setStatus(returnCode); } private void ajaxHandleUpload(final HttpServletRequest req, final HttpServletResponse resp, final Map<String, String> ret, final Map<String, Object> multipart, final Session session) throws ServletException, IOException { final User user = session.getUser(); final String projectName = (String) multipart.get("project"); final Project project = validateUploadAndGetProject(resp, ret, user, projectName); if (project == null) { return; } final FileItem item = (FileItem) multipart.get("file"); final String name = item.getName(); final String lowercaseExtension = FilenameUtils.getExtension(name).toLowerCase(); final Boolean hasZipExtension = lowercaseExtension.equals("zip"); final String contentType = item.getContentType(); if (contentType == null || !hasZipExtension || (!contentType.startsWith(APPLICATION_ZIP_MIME_TYPE) && !contentType.startsWith("application/x-zip-compressed") && !contentType.startsWith("application/octet-stream"))) { item.delete(); if (!hasZipExtension) { registerError(ret, "File extension '" + lowercaseExtension + "' unrecognized.", resp, HttpServletResponse.SC_BAD_REQUEST); } else { registerError(ret, "Content type '" + contentType + "' does not match extension '" + lowercaseExtension + "'", resp, HttpServletResponse.SC_BAD_REQUEST); } return; } final String autoFix = (String) multipart.get("fix"); final Props props = new Props(); if (autoFix != null && autoFix.equals("off")) { props.put(ValidatorConfigs.CUSTOM_AUTO_FIX_FLAG_PARAM, "false"); } else { props.put(ValidatorConfigs.CUSTOM_AUTO_FIX_FLAG_PARAM, "true"); } ret.put("projectId", String.valueOf(project.getId())); final File tempDir = Utils.createTempDir(); OutputStream out = null; try { logger.info("Uploading file to web server " + name); final File archiveFile = new File(tempDir, name); out = new BufferedOutputStream(new FileOutputStream(archiveFile)); IOUtils.copy(item.getInputStream(), out); out.close(); if (this.enableQuartz) { //todo chengren311: should maintain atomicity, // e.g, if uploadProject fails, associated schedule shouldn't be added. this.scheduler.unschedule(project); } // get the locked flows for the project, so that they can be locked again after upload final List<String> lockedFlows = getLockedFlows(project); final Map<String, ValidationReport> reports = this.projectManager .uploadProject(project, archiveFile, lowercaseExtension, user, props); if (this.enableQuartz) { this.scheduler.schedule(project, user.getUserId()); } // reset locks for flows as needed lockFlowsForProject(project, lockedFlows); // remove schedule of renamed/deleted flows removeScheduleOfDeletedFlows(project, this.scheduleManager, (schedule) -> { logger.info( "Removed schedule with id {} of renamed/deleted flow: {} from project: {}.", schedule.getScheduleId(), schedule.getFlowName(), schedule.getProjectName()); this.projectManager.postProjectEvent(project, EventType.SCHEDULE, "azkaban", "Schedule " + schedule.toString() + " has been removed."); }); registerErrorsAndWarningsFromValidationReport(resp, ret, reports); } catch (final Exception e) { logger.info("Installation Failed.", e); String error = e.getMessage(); if (error.length() > 512) { error = error.substring(0, 512) + "<br>Too many errors to display.<br>"; } registerError(ret, "Installation Failed.<br>" + error, resp, HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } finally { if (out != null) { out.close(); } if (tempDir.exists()) { FileUtils.deleteDirectory(tempDir); } } logger.info("Upload: project " + projectName + " version is " + project.getVersion() + ", reference is " + System.identityHashCode(project)); ret.put("version", String.valueOf(project.getVersion())); } /** * @return project. Null if invalid upload params or not enough permissions to proceed. */ private Project validateUploadAndGetProject(final HttpServletResponse resp, final Map<String, String> ret, final User user, final String projectName) { if (projectName == null || projectName.isEmpty()) { registerError(ret, "No project name found.", resp, HttpServletResponse.SC_BAD_REQUEST); return null; } final Project project = this.projectManager.getProject(projectName); if (project == null || !project.isActive()) { final String failureCause = (project == null) ? "doesn't exist." : "was already removed."; registerError(ret, "Installation Failed. Project '" + projectName + " " + failureCause, resp, HttpServletResponse.SC_GONE); return null; } logger.info( "Upload: reference of project " + projectName + " is " + System.identityHashCode(project)); if (this.lockdownUploadProjects && !UserUtils .hasPermissionforAction(this.userManager, user, Type.UPLOADPROJECTS)) { final String message = "Project uploading is locked out. Only admin users and users with special permissions can upload projects. " + "User " + user.getUserId() + " doesn't have permission to upload project."; logger.info(message); registerError(ret, message, resp, HttpServletResponse.SC_FORBIDDEN); return null; } if (!hasPermission(project, user, Type.WRITE)) { registerError(ret, "Installation Failed. User '" + user.getUserId() + "' does not have write access.", resp, HttpServletResponse.SC_BAD_REQUEST); return null; } return project; } /** * Remove schedule of renamed/deleted flows * * @param project project from which old flows will be unscheduled * @param scheduleManager the schedule manager * @param onDeletedSchedule a callback function to execute with every deleted schedule */ static void removeScheduleOfDeletedFlows(final Project project, final ScheduleManager scheduleManager, final Consumer<Schedule> onDeletedSchedule) throws ScheduleManagerException { final Set<String> flowNameList = project.getFlows().stream().map(f -> f.getId()).collect( Collectors.toSet()); for (final Schedule schedule : scheduleManager.getSchedules()) { if (schedule.getProjectId() == project.getId() && !flowNameList.contains(schedule.getFlowName())) { scheduleManager.removeSchedule(schedule); onDeletedSchedule.accept(schedule); } } } private void registerErrorsAndWarningsFromValidationReport(final HttpServletResponse resp, final Map<String, String> ret, final Map<String, ValidationReport> reports) { final StringBuffer errorMsgs = new StringBuffer(); final StringBuffer warnMsgs = new StringBuffer(); for (final Entry<String, ValidationReport> reportEntry : reports.entrySet()) { final ValidationReport report = reportEntry.getValue(); for (final String msg : report.getInfoMsgs()) { switch (ValidationReport.getInfoMsgLevel(msg)) { case ERROR: errorMsgs.append(ValidationReport.getInfoMsg(msg) + "<br/>"); break; case WARN: warnMsgs.append(ValidationReport.getInfoMsg(msg) + "<br/>"); break; default: break; } } if (!report.getErrorMsgs().isEmpty()) { errorMsgs.append("Validator " + reportEntry.getKey() + " reports errors:<br><br>"); for (final String msg : report.getErrorMsgs()) { errorMsgs.append(msg + "<br>"); } } if (!report.getWarningMsgs().isEmpty()) { warnMsgs.append("Validator " + reportEntry.getKey() + " reports warnings:<br><br>"); for (final String msg : report.getWarningMsgs()) { warnMsgs.append(msg + "<br>"); } } } if (errorMsgs.length() > 0) { // If putting more than 4000 characters in the cookie, the entire message will somehow // get discarded. registerError(ret, errorMsgs.length() > 4000 ? errorMsgs.substring(0, 4000) : errorMsgs.toString(), resp, HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } if (warnMsgs.length() > 0) { ret.put("warn", warnMsgs.length() > 4000 ? warnMsgs.substring(0, 4000) : warnMsgs.toString()); } } /** * @return the list of locked flows for the specified project. */ private List<String> getLockedFlows(final Project project) { final List<Flow> flows = project.getFlows(); return flows.stream().filter(flow -> flow.isLocked()).map(flow -> flow.getId()) .collect(Collectors.toList()); } /** * Lock the specified flows for the project. * * @param project the project * @param lockedFlows list of flow IDs of flows to lock */ private void lockFlowsForProject(final Project project, final List<String> lockedFlows) { for (final String flowId : lockedFlows) { final Flow flow = project.getFlow(flowId); if (flow != null) { flow.setLocked(true); } } } private void handleUpload(final HttpServletRequest req, final HttpServletResponse resp, final Map<String, Object> multipart, final Session session) throws ServletException, IOException { final HashMap<String, String> ret = new HashMap<>(); final String projectName = (String) multipart.get("project"); ajaxHandleUpload(req, resp, ret, multipart, session); if (ret.containsKey(ERROR_PARAM)) { setErrorMessageInCookie(resp, ret.get(ERROR_PARAM)); } if (ret.containsKey("warn")) { setWarnMessageInCookie(resp, ret.get("warn")); } resp.sendRedirect(req.getRequestURI() + "?project=" + projectName); } private Permission getPermissionObject(final Project project, final User user, final Permission.Type type) { final Permission perm = project.getCollectivePermission(user); for (final String roleName : user.getRoles()) { final Role role = this.userManager.getRole(roleName); perm.addPermissions(role.getPermission()); } return perm; } private void handleReloadProjectWhitelist(final HttpServletRequest req, final HttpServletResponse resp, final Session session) throws IOException { final HashMap<String, Object> ret = new HashMap<>(); if (hasPermission(session.getUser(), Permission.Type.ADMIN)) { try { if (this.projectManager.loadProjectWhiteList()) { ret.put("success", "Project whitelist re-loaded!"); } else { ret.put(ERROR_PARAM, "azkaban.properties doesn't contain property " + ProjectWhitelist.XML_FILE_PARAM); } } catch (final Exception e) { ret.put(ERROR_PARAM, "Exception occurred while trying to re-load project whitelist: " + e); } } else { ret.put(ERROR_PARAM, "Provided session doesn't have admin privilege."); } this.writeJSON(resp, ret); } protected boolean hasPermission(final User user, final Permission.Type type) { for (final String roleName : user.getRoles()) { final Role role = this.userManager.getRole(roleName); if (role.getPermission().isPermissionSet(type) || role.getPermission().isPermissionSet(Permission.Type.ADMIN)) { return true; } } return false; } private static class NodeLevelComparator implements Comparator<Node> { @Override public int compare(final Node node1, final Node node2) { return node1.getLevel() - node2.getLevel(); } } public static class PageSelection { private final String page; private final int size; private final boolean disabled; private final int nextPage; private boolean selected; public PageSelection(final String pageName, final int size, final boolean disabled, final boolean selected, final int nextPage) { this.page = pageName; this.size = size; this.disabled = disabled; this.setSelected(selected); this.nextPage = nextPage; } public String getPage() { return this.page; } public int getSize() { return this.size; } public boolean getDisabled() { return this.disabled; } public boolean isSelected() { return this.selected; } public void setSelected(final boolean selected) { this.selected = selected; } public int getNextPage() { return this.nextPage; } } }
1
18,589
Deleting unused code.
azkaban-azkaban
java
@@ -1,6 +1,7 @@ import torch -from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner +from ..bbox import assign_and_sample, bbox2delta, build_assigner +from ..bbox.samplers.pseudo_sampler import PseudoSampler from ..utils import multi_apply
1
import torch from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner from ..utils import multi_apply def anchor_target(anchor_list, valid_flag_list, gt_bboxes_list, img_metas, target_means, target_stds, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, background_label=80, sampling=True, unmap_outputs=True): """Compute regression and classification targets for anchors. Args: anchor_list (list[list]): Multi level anchors of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. target_means (Iterable): Mean value of regression targets. target_stds (Iterable): Std value of regression targets. cfg (dict): RPN train configs. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_bboxes_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. background_label (int): Label ID of background. sampling (bool): Whether to do sampling. upmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( anchor_target_single, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, target_means=target_means, target_stds=target_stds, cfg=cfg, label_channels=label_channels, background_label=background_label, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def images_to_levels(target, num_level_anchors): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets def anchor_target_single(flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, target_means, target_stds, cfg, label_channels=1, background_label=80, sampling=True, unmap_outputs=True): inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], cfg.allowed_border) if not inside_flags.any(): return (None, ) * 6 # assign gt and sample anchors anchors = flat_anchors[inside_flags.type(torch.bool), :] if sampling: assign_result, sampling_result = assign_and_sample( anchors, gt_bboxes, gt_bboxes_ignore, None, cfg) else: bbox_assigner = build_assigner(cfg.assigner) assign_result = bbox_assigner.assign(anchors, gt_bboxes, gt_bboxes_ignore, gt_labels) bbox_sampler = PseudoSampler() sampling_result = bbox_sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), background_label, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, target_means, target_stds) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # only rpn gives gt_labels as None, this time FG is 1 labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] if cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \ (flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \ (flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \ (flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8) else: inside_flags = valid_flags return inside_flags def unmap(data, count, inds, fill=0): """ Unmap a subset of item (data) back to the original set of items (of size count) """ if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret
1
19,053
`PseudoSampler` can also be imported from `..bbox`
open-mmlab-mmdetection
py
@@ -19,6 +19,7 @@ package agreementtest import ( "fmt" + "github.com/algorand/go-algorand/protocol" "strconv" "time"
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. // Package agreementtest produces useful functions for testing code. package agreementtest import ( "fmt" "strconv" "time" "github.com/algorand/go-deadlock" "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/agreement/gossip" "github.com/algorand/go-algorand/components/mocks" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/db" "github.com/algorand/go-algorand/util/timers" ) type instant struct { Z0, Z1 chan struct{} timeoutAtCalled chan struct{} eventsQueues map[string]int mu deadlock.Mutex } func makeInstant() *instant { i := new(instant) i.Z0 = make(chan struct{}, 1) i.Z1 = make(chan struct{}) i.timeoutAtCalled = make(chan struct{}) i.eventsQueues = make(map[string]int) return i } func (i *instant) Decode([]byte) (timers.Clock, error) { return i, nil } func (i *instant) Encode() []byte { return nil } func (i *instant) TimeoutAt(d time.Duration) <-chan time.Time { ta := make(chan time.Time) select { case <-i.timeoutAtCalled: default: close(i.timeoutAtCalled) return ta } if d == agreement.FilterTimeout() && !i.HasPending("pseudonode") { close(ta) } return ta } func (i *instant) Zero() timers.Clock { i.Z0 <- struct{}{} // pause here until runRound is called i.Z1 <- struct{}{} return i } func (i *instant) runRound(r basics.Round) { <-i.Z1 // wait until Zero is called <-i.timeoutAtCalled <-i.Z0 } func (i *instant) shutdown() { <-i.Z1 } func (i *instant) UpdateEventsQueue(queueName string, queueLength int) { i.mu.Lock() defer i.mu.Unlock() i.eventsQueues[queueName] = queueLength } func (i *instant) HasPending(queueName string) bool { i.mu.Lock() defer i.mu.Unlock() v, has := i.eventsQueues[queueName] if !has { return false } if v == 0 { return false } return true } type blackhole struct { mocks.MockNetwork } func (b *blackhole) Address() (string, bool) { return "blackhole", true } // CryptoRandomSource is a random source that is based off our crypto library. type CryptoRandomSource struct{} // Uint64 implements the randomness by calling hte crypto library. func (c *CryptoRandomSource) Uint64() uint64 { return crypto.RandUint64() } // Simulate n rounds of agreement on the specified Ledger given the specified // KeyManager, BlockFactory, and BlockValidator. // // If a nonzero roundDeadline is given, this function will return an error if // any round does not conclude by the deadline. // // The KeyManager must have enough keys to form a cert-quorum. func Simulate(dbname string, n basics.Round, roundDeadline time.Duration, ledger agreement.Ledger, keyManager agreement.KeyManager, proposalFactory agreement.BlockFactory, proposalValidator agreement.BlockValidator, log logging.Logger) error { startRound := ledger.NextRound() stopRound := startRound + n // stop when ledger.NextRound() == stopRound accessor, err := db.MakeAccessor(dbname+"_simulate_"+strconv.Itoa(int(stopRound))+"_crash.db", false, true) if err != nil { return err } defer accessor.Close() stopwatch := makeInstant() parameters := agreement.Parameters{ Logger: log, Accessor: accessor, Clock: stopwatch, Network: gossip.WrapNetwork(new(blackhole), log), Ledger: ledger, BlockFactory: proposalFactory, BlockValidator: proposalValidator, KeyManager: keyManager, Local: config.Local{ CadaverSizeTarget: 200 * 1024, }, RandomSource: &CryptoRandomSource{}, EventsProcessingMonitor: stopwatch, } _ = accessor service := agreement.MakeService(parameters) service.Start() defer service.Shutdown() defer stopwatch.shutdown() for ledger.NextRound() < stopRound { r := ledger.NextRound() stopwatch.runRound(r) deadlineCh := time.After(roundDeadline) if roundDeadline == 0 { deadlineCh = nil } select { case <-ledger.Wait(r): case <-deadlineCh: return fmt.Errorf("agreementtest.Simulate: round %d failed to complete by the deadline (%v)", r, roundDeadline) } } return nil }
1
39,589
Could you move this import to where the other `github.com/algorand/go-algorand` imports are?
algorand-go-algorand
go
@@ -655,7 +655,7 @@ class Conf(ConfClass): # can, tls, http are not loaded by default load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns', 'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet', - 'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp', + 'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp','l2f', 'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios', 'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip', 'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'smb2', 'snmp',
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ Implementation of the configuration object. """ from __future__ import absolute_import from __future__ import print_function import functools import os import re import time import socket import sys import atexit from scapy import VERSION, base_classes from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS from scapy.error import log_scapy, warning, ScapyInvalidPlatformException from scapy.modules import six from scapy.themes import NoTheme, apply_ipython_style ############ # Config # ############ class ConfClass(object): def configure(self, cnf): self.__dict__ = cnf.__dict__.copy() def __repr__(self): return str(self) def __str__(self): s = "" keys = self.__class__.__dict__.copy() keys.update(self.__dict__) keys = sorted(keys) for i in keys: if i[0] != "_": r = repr(getattr(self, i)) r = " ".join(r.split()) wlen = 76 - max(len(i), 10) if len(r) > wlen: r = r[:wlen - 3] + "..." s += "%-10s = %s\n" % (i, r) return s[:-1] class Interceptor(object): def __init__(self, name=None, default=None, hook=None, args=None, kargs=None): self.name = name self.intname = "_intercepted_%s" % name self.default = default self.hook = hook self.args = args if args is not None else [] self.kargs = kargs if kargs is not None else {} def __get__(self, obj, typ=None): if not hasattr(obj, self.intname): setattr(obj, self.intname, self.default) return getattr(obj, self.intname) @staticmethod def set_from_hook(obj, name, val): int_name = "_intercepted_%s" % name setattr(obj, int_name, val) def __set__(self, obj, val): setattr(obj, self.intname, val) self.hook(self.name, val, *self.args, **self.kargs) def _readonly(name): default = Conf.__dict__[name].default Interceptor.set_from_hook(conf, name, default) raise ValueError("Read-only value !") ReadOnlyAttribute = functools.partial( Interceptor, hook=(lambda name, *args, **kwargs: _readonly(name)) ) ReadOnlyAttribute.__doc__ = "Read-only class attribute" class ProgPath(ConfClass): universal_open = "open" if DARWIN else "xdg-open" pdfreader = universal_open psreader = universal_open svgreader = universal_open dot = "dot" display = "display" tcpdump = "tcpdump" tcpreplay = "tcpreplay" hexedit = "hexer" tshark = "tshark" wireshark = "wireshark" ifconfig = "ifconfig" class ConfigFieldList: def __init__(self): self.fields = set() self.layers = set() @staticmethod def _is_field(f): return hasattr(f, "owners") def _recalc_layer_list(self): self.layers = {owner for f in self.fields for owner in f.owners} def add(self, *flds): self.fields |= {f for f in flds if self._is_field(f)} self._recalc_layer_list() def remove(self, *flds): self.fields -= set(flds) self._recalc_layer_list() def __contains__(self, elt): if isinstance(elt, base_classes.Packet_metaclass): return elt in self.layers return elt in self.fields def __repr__(self): return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501 class Emphasize(ConfigFieldList): pass class Resolve(ConfigFieldList): pass class Num2Layer: def __init__(self): self.num2layer = {} self.layer2num = {} def register(self, num, layer): self.register_num2layer(num, layer) self.register_layer2num(num, layer) def register_num2layer(self, num, layer): self.num2layer[num] = layer def register_layer2num(self, num, layer): self.layer2num[layer] = num def __getitem__(self, item): if isinstance(item, base_classes.Packet_metaclass): return self.layer2num[item] return self.num2layer[item] def __contains__(self, item): if isinstance(item, base_classes.Packet_metaclass): return item in self.layer2num return item in self.num2layer def get(self, item, default=None): return self[item] if item in self else default def __repr__(self): lst = [] for num, layer in six.iteritems(self.num2layer): if layer in self.layer2num and self.layer2num[layer] == num: dir = "<->" else: dir = " ->" lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__, layer._name))) for layer, num in six.iteritems(self.layer2num): if num not in self.num2layer or self.num2layer[num] != layer: lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__, layer._name))) lst.sort() return "\n".join(y for x, y in lst) class LayersList(list): def __init__(self): list.__init__(self) self.ldict = {} self.filtered = False self._backup_dict = {} def __repr__(self): return "\n".join("%-20s: %s" % (layer.__name__, layer.name) for layer in self) def register(self, layer): self.append(layer) if layer.__module__ not in self.ldict: self.ldict[layer.__module__] = [] self.ldict[layer.__module__].append(layer) def layers(self): result = [] # This import may feel useless, but it is required for the eval below import scapy # noqa: F401 for lay in self.ldict: doc = eval(lay).__doc__ result.append((lay, doc.strip().split("\n")[0] if doc else lay)) return result def filter(self, items): """Disable dissection of unused layers to speed up dissection""" if self.filtered: raise ValueError("Already filtered. Please disable it first") for lay in six.itervalues(self.ldict): for cls in lay: if cls not in self._backup_dict: self._backup_dict[cls] = cls.payload_guess[:] cls.payload_guess = [ y for y in cls.payload_guess if y[1] in items ] self.filtered = True def unfilter(self): """Re-enable dissection for all layers""" if not self.filtered: raise ValueError("Not filtered. Please filter first") for lay in six.itervalues(self.ldict): for cls in lay: cls.payload_guess = self._backup_dict[cls] self._backup_dict.clear() self.filtered = False class CommandsList(list): def __repr__(self): s = [] for li in sorted(self, key=lambda x: x.__name__): doc = li.__doc__.split("\n")[0] if li.__doc__ else "--" s.append("%-20s: %s" % (li.__name__, doc)) return "\n".join(s) def register(self, cmd): self.append(cmd) return cmd # return cmd so that method can be used as a decorator def lsc(): """Displays Scapy's default commands""" print(repr(conf.commands)) class CacheInstance(dict, object): __slots__ = ["timeout", "name", "_timetable", "__dict__"] def __init__(self, name="noname", timeout=None): self.timeout = timeout self.name = name self._timetable = {} def flush(self): self.__init__(name=self.name, timeout=self.timeout) def __getitem__(self, item): if item in self.__slots__: return object.__getattribute__(self, item) val = dict.__getitem__(self, item) if self.timeout is not None: t = self._timetable[item] if time.time() - t > self.timeout: raise KeyError(item) return val def get(self, item, default=None): # overloading this method is needed to force the dict to go through # the timetable check try: return self[item] except KeyError: return default def __setitem__(self, item, v): if item in self.__slots__: return object.__setattr__(self, item, v) self._timetable[item] = time.time() dict.__setitem__(self, item, v) def update(self, other): for key, value in six.iteritems(other): # We only update an element from `other` either if it does # not exist in `self` or if the entry in `self` is older. if key not in self or self._timetable[key] < other._timetable[key]: dict.__setitem__(self, key, value) self._timetable[key] = other._timetable[key] def iteritems(self): if self.timeout is None: return six.iteritems(self.__dict__) t0 = time.time() return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501 def iterkeys(self): if self.timeout is None: return six.iterkeys(self.__dict__) t0 = time.time() return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501 def __iter__(self): return six.iterkeys(self.__dict__) def itervalues(self): if self.timeout is None: return six.itervalues(self.__dict__) t0 = time.time() return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501 def items(self): if self.timeout is None: return dict.items(self) t0 = time.time() return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501 def keys(self): if self.timeout is None: return dict.keys(self) t0 = time.time() return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501 def values(self): if self.timeout is None: return list(six.itervalues(self)) t0 = time.time() return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501 def __len__(self): if self.timeout is None: return dict.__len__(self) return len(self.keys()) def summary(self): return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501 def __repr__(self): s = [] if self: mk = max(len(k) for k in six.iterkeys(self.__dict__)) fmt = "%%-%is %%s" % (mk + 1) for item in six.iteritems(self.__dict__): s.append(fmt % item) return "\n".join(s) class NetCache: def __init__(self): self._caches_list = [] def add_cache(self, cache): self._caches_list.append(cache) setattr(self, cache.name, cache) def new_cache(self, name, timeout=None): c = CacheInstance(name=name, timeout=timeout) self.add_cache(c) def __delattr__(self, attr): raise AttributeError("Cannot delete attributes") def update(self, other): for co in other._caches_list: if hasattr(self, co.name): getattr(self, co.name).update(co) else: self.add_cache(co.copy()) def flush(self): for c in self._caches_list: c.flush() def __repr__(self): return "\n".join(c.summary() for c in self._caches_list) def _version_checker(module, minver): """Checks that module has a higher version that minver. params: - module: a module to test - minver: a tuple of versions """ # We could use LooseVersion, but distutils imports imp which is deprecated version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?' version_tags = re.match(version_regexp, module.__version__) if not version_tags: return False version_tags = version_tags.group(1).split(".") version_tags = tuple(int(x) for x in version_tags) return version_tags >= minver def isCryptographyValid(): """ Check if the cryptography module >= 2.0.0 is present. This is the minimum version for most usages in Scapy. """ try: import cryptography except ImportError: return False return _version_checker(cryptography, (2, 0, 0)) def isCryptographyAdvanced(): """ Check if the cryptography module is present, and if it supports X25519, ChaCha20Poly1305 and such. Notes: - cryptography >= 2.0 is required - OpenSSL >= 1.1.0 is required """ try: from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501 X25519PrivateKey.generate() except Exception: return False else: return True def isPyPy(): """Returns either scapy is running under PyPy or not""" try: import __pypy__ # noqa: F401 return True except ImportError: return False def _prompt_changer(attr, val): """Change the current prompt theme""" try: sys.ps1 = conf.color_theme.prompt(conf.prompt) except Exception: pass try: apply_ipython_style(get_ipython()) except NameError: pass def _set_conf_sockets(): """Populate the conf.L2Socket and conf.L3Socket according to the various use_* parameters """ from scapy.main import _load if conf.use_bpf and not BSD: Interceptor.set_from_hook(conf, "use_bpf", False) raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !") if not conf.use_pcap and SOLARIS: Interceptor.set_from_hook(conf, "use_pcap", True) raise ScapyInvalidPlatformException( "Scapy only supports libpcap on Solaris !" ) # we are already in an Interceptor hook, use Interceptor.set_from_hook if conf.use_pcap: try: from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \ L3pcapSocket except (OSError, ImportError): warning("No libpcap provider available ! pcap won't be used") Interceptor.set_from_hook(conf, "use_pcap", False) else: conf.L3socket = L3pcapSocket conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6") conf.L2socket = L2pcapSocket conf.L2listen = L2pcapListenSocket # Update globals _load("scapy.arch.pcapdnet") return if conf.use_bpf: from scapy.arch.bpf.supersocket import L2bpfListenSocket, \ L2bpfSocket, L3bpfSocket conf.L3socket = L3bpfSocket conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6") conf.L2socket = L2bpfSocket conf.L2listen = L2bpfListenSocket # Update globals _load("scapy.arch.bpf") return if LINUX: from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket conf.L3socket = L3PacketSocket conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6") conf.L2socket = L2Socket conf.L2listen = L2ListenSocket # Update globals _load("scapy.arch.linux") return if WINDOWS: from scapy.arch.windows import _NotAvailableSocket from scapy.arch.windows.native import L3WinSocket, L3WinSocket6 conf.L3socket = L3WinSocket conf.L3socket6 = L3WinSocket6 conf.L2socket = _NotAvailableSocket conf.L2listen = _NotAvailableSocket # No need to update globals on Windows return from scapy.supersocket import L3RawSocket from scapy.layers.inet6 import L3RawSocket6 conf.L3socket = L3RawSocket conf.L3socket6 = L3RawSocket6 def _socket_changer(attr, val): if not isinstance(val, bool): raise TypeError("This argument should be a boolean") dependencies = { # Things that will be turned off "use_pcap": ["use_bpf"], "use_bpf": ["use_pcap"], } restore = {k: getattr(conf, k) for k in dependencies} del restore[attr] # This is handled directly by _set_conf_sockets if val: # Only if True for param in dependencies[attr]: Interceptor.set_from_hook(conf, param, False) try: _set_conf_sockets() except (ScapyInvalidPlatformException, ImportError) as e: for key, value in restore.items(): Interceptor.set_from_hook(conf, key, value) if isinstance(e, ScapyInvalidPlatformException): raise def _loglevel_changer(attr, val): """Handle a change of conf.logLevel""" log_scapy.setLevel(val) class Conf(ConfClass): """ This object contains the configuration of Scapy. """ version = ReadOnlyAttribute("version", VERSION) session = "" #: filename where the session will be saved interactive = False #: can be "ipython", "python" or "auto". Default: Auto interactive_shell = "" #: if 1, prevents any unwanted packet to go out (ARP, DNS, ...) stealth = "not implemented" #: selects the default output interface for srp() and sendp(). iface = None layers = LayersList() commands = CommandsList() ASN1_default_codec = None #: Codec used by default for ASN1 objects AS_resolver = None #: choose the AS resolver class to use dot15d4_protocol = None # Used in dot15d4.py logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer) #: if 0, doesn't check that IPID matches between IP sent and #: ICMP IP citation received #: if 1, checks that they either are equal or byte swapped #: equals (bug in some IP stacks) #: if 2, strictly checks that they are equals checkIPID = False #: if 1, checks IP src in IP and ICMP IP citation match #: (bug in some NAT stacks) checkIPsrc = True checkIPaddr = True #: if True, checks that IP-in-IP layers match. If False, do #: not check IP layers that encapsulates another IP layer checkIPinIP = True #: if 1, also check that TCP seq and ack match the #: ones in ICMP citation check_TCPerror_seqack = False verb = 2 #: level of verbosity, from 0 (almost mute) to 3 (verbose) prompt = Interceptor("prompt", ">>> ", _prompt_changer) #: default mode for listening socket (to get answers if you #: spoof on a lan) promisc = True sniff_promisc = 1 #: default mode for sniff() raw_layer = None raw_summary = False default_l2 = None l2types = Num2Layer() l3types = Num2Layer() L3socket = None L3socket6 = None L2socket = None L2listen = None BTsocket = None USBsocket = None min_pkt_size = 60 mib = None #: holds MIB direct access dictionary bufsize = 2**16 #: history file histfile = os.getenv('SCAPY_HISTFILE', os.path.join(os.path.expanduser("~"), ".scapy_history")) #: includes padding in disassembled packets padding = 1 #: BPF filter for packets to ignore except_filter = "" #: bpf filter added to every sniffing socket to exclude traffic #: from analysis filter = "" #: when 1, store received packet that are not matched into `debug.recv` debug_match = False #: When 1, print some TLS session secrets when they are computed. debug_tls = False wepkey = "" cache_iflist = {} #: holds the Scapy IPv4 routing table and provides methods to #: manipulate it route = None # Filed by route.py #: holds the Scapy IPv6 routing table and provides methods to #: manipulate it route6 = None # Filed by route6.py auto_fragment = True #: raise exception when a packet dissector raises an exception debug_dissector = False color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer) #: how much time between warnings from the same place warning_threshold = 5 prog = ProgPath() #: holds list of fields for which resolution should be done resolve = Resolve() #: holds list of enum fields for which conversion to string #: should NOT be done noenum = Resolve() emph = Emphasize() #: read only attribute to show if PyPy is in use use_pypy = ReadOnlyAttribute("use_pypy", isPyPy()) #: use libpcap integration or not. Changing this value will update #: the conf.L[2/3] sockets use_pcap = Interceptor( "use_pcap", os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"), _socket_changer ) use_bpf = Interceptor("use_bpf", False, _socket_changer) use_npcap = False ipv6_enabled = socket.has_ipv6 #: path or list of paths where extensions are to be looked for extensions_paths = "." stats_classic_protocols = [] stats_dot11_protocols = [] temp_files = [] netcache = NetCache() geoip_city = None # can, tls, http are not loaded by default load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns', 'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet', 'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp', 'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios', 'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip', 'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'smb2', 'snmp', 'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee'] #: a dict which can be used by contrib layers to store local #: configuration contribs = dict() crypto_valid = isCryptographyValid() crypto_valid_advanced = isCryptographyAdvanced() fancy_prompt = True auto_crop_tables = True #: how often to check for new packets. #: Defaults to 0.05s. recv_poll_rate = 0.05 #: When True, raise exception if no dst MAC found otherwise broadcast. #: Default is False. raise_no_dst_mac = False loopback_name = "lo" if LINUX else "lo0" def __getattr__(self, attr): # Those are loaded on runtime to avoid import loops if attr == "manufdb": from scapy.data import MANUFDB return MANUFDB if attr == "ethertypes": from scapy.data import ETHER_TYPES return ETHER_TYPES if attr == "protocols": from scapy.data import IP_PROTOS return IP_PROTOS if attr == "services_udp": from scapy.data import UDP_SERVICES return UDP_SERVICES if attr == "services_tcp": from scapy.data import TCP_SERVICES return TCP_SERVICES if attr == "iface6": warning("conf.iface6 is deprecated in favor of conf.iface") attr = "iface" return object.__getattribute__(self, attr) if not Conf.ipv6_enabled: log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501 for m in ["inet6", "dhcp6"]: if m in Conf.load_layers: Conf.load_layers.remove(m) conf = Conf() def crypto_validator(func): """ This a decorator to be used for any method relying on the cryptography library. # noqa: E501 Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'. """ def func_in(*args, **kwargs): if not conf.crypto_valid: raise ImportError("Cannot execute crypto-related method! " "Please install python-cryptography v1.7 or later.") # noqa: E501 return func(*args, **kwargs) return func_in def scapy_delete_temp_files(): # type: () -> None for f in conf.temp_files: try: os.unlink(f) except Exception: pass del conf.temp_files[:] atexit.register(scapy_delete_temp_files)
1
17,894
I'd rather not activate this protocol by default for now, unless there is a good reason for that.
secdev-scapy
py
@@ -19,11 +19,14 @@ import inspect # pylint: disable=line-too-long from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util import string_formats +from google.cloud.forseti.services.inventory.storage import DataAccess +from google.cloud.forseti.services.scanner import dao as scanner_dao from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification from google.cloud.forseti.notifier.notifiers import cscc_notifier +from google.cloud.forseti.notifier.notifiers import email_violations from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary -from google.cloud.forseti.services.inventory.storage import DataAccess -from google.cloud.forseti.services.scanner import dao as scanner_dao + + # pylint: enable=line-too-long
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Notifier runner.""" import importlib import inspect # pylint: disable=line-too-long from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util import string_formats from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification from google.cloud.forseti.notifier.notifiers import cscc_notifier from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary from google.cloud.forseti.services.inventory.storage import DataAccess from google.cloud.forseti.services.scanner import dao as scanner_dao # pylint: enable=line-too-long LOGGER = logger.get_logger(__name__) # pylint: disable=inconsistent-return-statements def find_notifiers(notifier_name): """Get the first class in the given sub module Args: notifier_name (str): Name of the notifier. Return: class: The class in the sub module """ try: module = importlib.import_module( 'google.cloud.forseti.notifier.notifiers.{0}'.format( notifier_name)) for filename in dir(module): obj = getattr(module, filename) if inspect.isclass(obj) \ and issubclass(obj, BaseNotification) \ and obj is not BaseNotification: return obj except ImportError: LOGGER.exception('Can\'t import notifier %s', notifier_name) # pylint: enable=inconsistent-return-statements def convert_to_timestamp(violations): """Convert violation created_at_datetime to timestamp string. Args: violations (dict): List of violations as dict with created_at_datetime. Returns: list: List of violations as dict with created_at_datetime converted to timestamp string. """ for violation in violations: violation['created_at_datetime'] = ( violation['created_at_datetime'].strftime( string_formats.TIMESTAMP_TIMEZONE)) return violations # pylint: disable=too-many-branches,too-many-statements def run(inventory_index_id, scanner_index_id, progress_queue, service_config=None): """Run the notifier. Entry point when the notifier is run as a library. Args: inventory_index_id (int64): Inventory index id. scanner_index_id (int64): Scanner index id. progress_queue (Queue): The progress queue. service_config (ServiceConfig): Forseti 2.0 service configs. Returns: int: Status code. """ # pylint: disable=too-many-locals global_configs = service_config.get_global_config() notifier_configs = service_config.get_notifier_config() with service_config.scoped_session() as session: if scanner_index_id: inventory_index_id = ( DataAccess.get_inventory_index_id_by_scanner_index_id( session, scanner_index_id)) else: if not inventory_index_id: inventory_index_id = ( DataAccess.get_latest_inventory_index_id(session)) scanner_index_id = scanner_dao.get_latest_scanner_index_id( session, inventory_index_id) if not scanner_index_id: LOGGER.error( 'No success or partial success scanner index found for ' 'inventory index: "%s".', str(inventory_index_id)) else: # get violations violation_access = scanner_dao.ViolationAccess(session) violations = violation_access.list( scanner_index_id=scanner_index_id) violations_as_dict = [] for violation in violations: violations_as_dict.append( scanner_dao.convert_sqlalchemy_object_to_dict(violation)) violations_as_dict = convert_to_timestamp(violations_as_dict) violation_map = scanner_dao.map_by_resource(violations_as_dict) for retrieved_v in violation_map: log_message = ( 'Retrieved {} violations for resource \'{}\''.format( len(violation_map[retrieved_v]), retrieved_v)) LOGGER.info(log_message) progress_queue.put(log_message) # build notification notifiers notifiers = [] for resource in notifier_configs['resources']: if violation_map.get(resource['resource']) is None: log_message = 'Resource \'{}\' has no violations'.format( resource['resource']) progress_queue.put(log_message) LOGGER.info(log_message) continue if not resource['should_notify']: LOGGER.debug('Not notifying for: %s', resource['resource']) continue for notifier in resource['notifiers']: log_message = ( 'Running \'{}\' notifier for resource \'{}\''.format( notifier['name'], resource['resource'])) progress_queue.put(log_message) LOGGER.info(log_message) chosen_pipeline = find_notifiers(notifier['name']) notifiers.append(chosen_pipeline( resource['resource'], inventory_index_id, violation_map[resource['resource']], global_configs, notifier_configs, notifier['configuration'])) # Run the notifiers. for notifier in notifiers: notifier.run() # Run the CSCC notifier. violation_configs = notifier_configs.get('violation') if violation_configs: if violation_configs.get('cscc').get('enabled'): source_id = violation_configs.get('cscc').get('source_id') if source_id: # beta mode LOGGER.debug( 'Running CSCC notifier with beta API. source_id: ' '%s', source_id) (cscc_notifier.CsccNotifier(inventory_index_id) .run(violations_as_dict, source_id=source_id)) else: # alpha mode LOGGER.debug('Running CSCC notifier with alpha API.') gcs_path = ( violation_configs.get('cscc').get('gcs_path')) mode = violation_configs.get('cscc').get('mode') organization_id = ( violation_configs.get('cscc').get( 'organization_id')) (cscc_notifier.CsccNotifier(inventory_index_id) .run(violations_as_dict, gcs_path, mode, organization_id)) InventorySummary(service_config, inventory_index_id).run() log_message = 'Notification completed!' progress_queue.put(log_message) progress_queue.put(None) LOGGER.info(log_message) return 0 # pylint: enable=too-many-branches,too-many-statements
1
33,146
Remove these 2 blank lines.
forseti-security-forseti-security
py
@@ -606,6 +606,7 @@ func (pool *TxPool) validateTx(tx types.Transaction, local bool) error { func (pool *TxPool) add(tx types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it hash := tx.Hash() + fmt.Printf("aa: %x\n", hash) if pool.all.Get(hash) != nil { log.Trace("Discarding already known transaction", "hash", hash) knownTxMeter.Mark(1)
1
// Copyright 2014 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package core import ( "errors" "fmt" "sort" "sync" "time" "github.com/holiman/uint256" "github.com/ledgerwatch/turbo-geth/common" "github.com/ledgerwatch/turbo-geth/common/prque" "github.com/ledgerwatch/turbo-geth/core/state" "github.com/ledgerwatch/turbo-geth/core/types" "github.com/ledgerwatch/turbo-geth/ethdb" "github.com/ledgerwatch/turbo-geth/event" "github.com/ledgerwatch/turbo-geth/log" "github.com/ledgerwatch/turbo-geth/metrics" "github.com/ledgerwatch/turbo-geth/params" ) const ( // chainHeadChanSize is the size of channel listening to ChainHeadEvent. chainHeadChanSize = 10 // txSlotSize is used to calculate how many data slots a single transaction // takes up based on its size. The slots are used as DoS protection, ensuring // that validating a new transaction remains a constant operation (in reality // O(maxslots), where max slots are 4 currently). txSlotSize = 32 * 1024 // txMaxSize is the maximum size a single transaction can have. This field has // non-trivial consequences: larger transactions are significantly harder and // more expensive to propagate; larger transactions also take more resources // to validate whether they fit into the pool or not. txMaxSize = 4 * txSlotSize // 128KB ) var ( // ErrAlreadyKnown is returned if the transactions is already contained // within the pool. ErrAlreadyKnown = errors.New("already known") // ErrInvalidSender is returned if the transaction contains an invalid signature. ErrInvalidSender = errors.New("invalid sender") // ErrUnderpriced is returned if a transaction's gas price is below the minimum // configured for the transaction pool. ErrUnderpriced = errors.New("transaction underpriced") // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet // another remote transaction. ErrTxPoolOverflow = errors.New("txpool is full") // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced // with a different one without the required price bump. ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") // ErrGasLimit is returned if a transaction's requested gas limit exceeds the // maximum allowance of the current block. ErrGasLimit = errors.New("exceeds block gas limit") // ErrNegativeValue is a sanity error to ensure no one is able to specify a // transaction with a negative value. ErrNegativeValue = errors.New("negative value") // ErrOversizedData is returned if the input data of a transaction is greater // than some meaningful limit a user might use. This is not a consensus error // making the transaction invalid, rather a DOS protection. ErrOversizedData = errors.New("oversized data") ) var ( evictionInterval = time.Minute // Time interval to check for evictable transactions statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats ) var ( // Metrics for the pending pool pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds // Metrics for the queued pool queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime // General tx metrics knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) localGauge = metrics.NewRegisteredGauge("txpool/local", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) ) // TxStatus is the current status of a transaction as seen by the pool. type TxStatus uint const ( TxStatusUnknown TxStatus = iota TxStatusQueued TxStatusPending TxStatusIncluded ) // TxPoolConfig are the configuration parameters of the transaction pool. type TxPoolConfig struct { Locals []common.Address // Addresses that should be treated by default as local NoLocals bool // Whether local transaction handling should be disabled Journal string // Journal of local transactions to survive node restarts Rejournal time.Duration // Time interval to regenerate the local transaction journal PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) AccountSlots uint64 // Number of executable transaction slots guaranteed per account GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts Lifetime time.Duration // Maximum amount of time non-executable transaction are queued StartOnInit bool } // DefaultTxPoolConfig contains the default configurations for the transaction // pool. var DefaultTxPoolConfig = TxPoolConfig{ Journal: "transactions.rlp", Rejournal: time.Hour, PriceLimit: 1, PriceBump: 10, AccountSlots: 16, GlobalSlots: 4096, AccountQueue: 64, GlobalQueue: 1024, Lifetime: 3 * time.Hour, } // sanitize checks the provided user configurations and changes anything that's // unreasonable or unworkable. func (config *TxPoolConfig) sanitize() TxPoolConfig { conf := *config if conf.Rejournal < time.Second { log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) conf.Rejournal = time.Second } if conf.PriceLimit < 1 { log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) conf.PriceLimit = DefaultTxPoolConfig.PriceLimit } if conf.PriceBump < 1 { log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) conf.PriceBump = DefaultTxPoolConfig.PriceBump } if conf.AccountSlots < 1 { log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) conf.AccountSlots = DefaultTxPoolConfig.AccountSlots } if conf.GlobalSlots < 1 { log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots } if conf.AccountQueue < 1 { log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) conf.AccountQueue = DefaultTxPoolConfig.AccountQueue } if conf.GlobalQueue < 1 { log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue } if conf.Lifetime < 1 { log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) conf.Lifetime = DefaultTxPoolConfig.Lifetime } return conf } // Backend contains all currently known transactions. Transactions // enter the pool when they are received from the network or submitted // locally. They exit the pool when they are included in the blockchain. // // The pool separates processable transactions (which can be applied to the // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { config TxPoolConfig chainconfig *params.ChainConfig chaindb ethdb.Database gasPrice *uint256.Int txFeed event.Feed scope event.SubscriptionScope chainHeadCh chan ChainHeadEvent signer *types.Signer mu sync.RWMutex istanbul bool // Fork indicator whether we are in the istanbul stage. eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. pendingNonces *txNoncer // Pending state tracking virtual nonces currentState *state.IntraBlockState // Current state in the blockchain head currentMaxGas uint64 // Current gas limit for transaction caps locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk pending map[common.Address]*txList // All currently processable transactions queue map[common.Address]*txList // Queued but non-processable transactions beats map[common.Address]time.Time // Last heartbeat from each known account all *txLookup // All transactions to allow lookups priced *txPricedList // All transactions sorted by price reqResetCh chan *txpoolResetRequest reqPromoteCh chan *accountSet queueTxEventCh chan types.Transaction reorgDoneCh chan chan struct{} reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop wg sync.WaitGroup // tracks loop, scheduleReorgLoop isStarted bool initFns []func() error stopFns []func() error stopCh chan struct{} } type txpoolResetRequest struct { oldHead, newHead *types.Header } // NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chaindb ethdb.Database, senderCacher *TxSenderCacher) *TxPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() // Create the transaction pool with its initial settings pool := &TxPool{ config: config, chainconfig: chainconfig, signer: types.LatestSigner(chainconfig), pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), all: newTxLookup(), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), reqResetCh: make(chan *txpoolResetRequest), reqPromoteCh: make(chan *accountSet), queueTxEventCh: make(chan types.Transaction), reorgDoneCh: make(chan chan struct{}), reorgShutdownCh: make(chan struct{}, 1), gasPrice: new(uint256.Int).SetUint64(config.PriceLimit), stopCh: make(chan struct{}), chaindb: chaindb, } pool.locals = newAccountSet(pool.signer) for _, addr := range pool.config.Locals { pool.locals.add(addr) } pool.priced = newTxPricedList(pool.all) return pool } func (pool *TxPool) Start(gasLimit uint64, headNumber uint64) error { pool.reorgShutdownCh = make(chan struct{}, 1) pool.locals = newAccountSet(pool.signer) for _, addr := range pool.config.Locals { pool.locals.add(addr) } pool.priced = newTxPricedList(pool.all) pool.resetHead(gasLimit, headNumber) // Start the reorg loop early so it can handle requests generated during journal loading. pool.wg.Add(1) go pool.scheduleReorgLoop() // If local transactions and journaling is enabled, load from disk if !pool.config.NoLocals && pool.config.Journal != "" { pool.journal = newTxJournal(pool.config.Journal) if err := pool.journal.load(pool.AddLocals); err != nil { log.Warn("Failed to load transaction journal", "err", err) } if err := pool.journal.rotate(pool.local()); err != nil { log.Warn("Failed to rotate transaction journal", "err", err) } } pool.wg.Add(1) go pool.loop() pool.isStarted = true log.Info("transaction pool started") return nil } // loop is the transaction pool's main event loop, waiting for and reacting to // outside blockchain events as well as for various reporting and transaction // eviction events. func (pool *TxPool) loop() { defer pool.wg.Done() var ( prevPending, prevQueued, prevStales int // Start the stats reporting and transaction eviction tickers report = time.NewTicker(statsReportInterval) evict = time.NewTicker(evictionInterval) journal = time.NewTicker(pool.config.Rejournal) ) defer report.Stop() defer evict.Stop() defer journal.Stop() for { select { // System shutdown. case <-pool.stopCh: common.SafeClose(pool.reorgShutdownCh) return // Handle stats reporting ticks case <-report.C: pool.mu.RLock() pending, queued := pool.stats() stales := pool.priced.stales pool.mu.RUnlock() if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) prevPending, prevQueued, prevStales = pending, queued, stales } // Handle inactive account transaction eviction case <-evict.C: pool.mu.Lock() for addr := range pool.queue { // Skip local transactions from the eviction mechanism if pool.locals.contains(addr) { continue } // Any non-locals old enough should be removed if time.Since(pool.beats[addr]) > pool.config.Lifetime { list := pool.queue[addr].Flatten() for _, tx := range list { pool.removeTxLocked(tx.Hash(), true) } queuedEvictionMeter.Mark(int64(len(list))) } } pool.mu.Unlock() // Handle local transaction journal rotation case <-journal.C: if pool.journal != nil { pool.mu.Lock() if err := pool.journal.rotate(pool.local()); err != nil { log.Warn("Failed to rotate local tx journal", "err", err) } pool.mu.Unlock() } } } } func (pool *TxPool) resetHead(blockGasLimit uint64, blockNumber uint64) { pool.mu.Lock() defer pool.mu.Unlock() pool.currentState = state.New(state.NewPlainStateReader(pool.chaindb)) pool.pendingNonces = newTxNoncer(pool.currentState) pool.currentMaxGas = blockGasLimit // Update all fork indicator by next pending block number. next := blockNumber + 1 pool.istanbul = pool.chainconfig.IsIstanbul(next) pool.eip2718 = pool.chainconfig.IsBerlin(next) } func (pool *TxPool) ResetHead(blockGasLimit uint64, blockNumber uint64) { pool.resetHead(blockGasLimit, blockNumber) <-pool.requestReset(nil, nil) } // Stop terminates the transaction pool. func (pool *TxPool) Stop() { // Unsubscribe all subscriptions registered from txpool if !pool.IsStarted() { return } close(pool.stopCh) // Unsubscribe subscriptions registered from blockchain pool.wg.Wait() if pool.journal != nil { pool.journal.close() } pool.isStarted = false log.Info("Transaction pool stopped") } // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // starts sending event to the given channel. func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { return pool.scope.Track(pool.txFeed.Subscribe(ch)) } // GasPrice returns the current gas price enforced by the transaction pool. func (pool *TxPool) GasPrice() *uint256.Int { pool.mu.RLock() defer pool.mu.RUnlock() return new(uint256.Int).Set(pool.gasPrice) } // SetGasPrice updates the minimum price required by the transaction pool for a // new transaction, and drops all transactions below this threshold. func (pool *TxPool) SetGasPrice(price *uint256.Int) { pool.mu.Lock() defer pool.mu.Unlock() pool.gasPrice = price for _, tx := range pool.priced.Cap(price) { pool.RemoveTx(tx.Hash(), false) } log.Info("Transaction pool price threshold updated", "price", price) } // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. func (pool *TxPool) Nonce(addr common.Address) uint64 { pool.mu.RLock() defer pool.mu.RUnlock() return pool.pendingNonces.get(addr) } // Stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *TxPool) Stats() (int, int) { pool.mu.RLock() defer pool.mu.RUnlock() return pool.stats() } // stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *TxPool) stats() (int, int) { pending := 0 for _, list := range pool.pending { pending += list.Len() } queued := 0 for _, list := range pool.queue { queued += list.Len() } return pending, queued } // Content retrieves the data content of the transaction pool, returning all the // pending as well as queued transactions, grouped by account and sorted by nonce. func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { pool.mu.Lock() defer pool.mu.Unlock() pending := make(map[common.Address]types.Transactions) for addr, list := range pool.pending { pending[addr] = list.Flatten() } queued := make(map[common.Address]types.Transactions) for addr, list := range pool.queue { queued[addr] = list.Flatten() } return pending, queued } // Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) Pending() (types.TransactionsGroupedBySender, error) { var pending types.TransactionsGroupedBySender if !pool.IsStarted() { return pending, nil } pool.mu.Lock() defer pool.mu.Unlock() pending = make(types.TransactionsGroupedBySender, len(pool.pending)) for _, list := range pool.pending { pending = append(pending, list.Flatten()) } return pending, nil } // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() defer pool.mu.Unlock() return pool.locals.flatten() } // local retrieves all currently known local transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) local() map[common.Address]types.Transactions { txs := make(map[common.Address]types.Transactions) for addr := range pool.locals.accounts { if pending := pool.pending[addr]; pending != nil { txs[addr] = append(txs[addr], pending.Flatten()...) } if queued := pool.queue[addr]; queued != nil { txs[addr] = append(txs[addr], queued.Flatten()...) } } return txs } // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). func (pool *TxPool) validateTx(tx types.Transaction, local bool) error { // Accept only legacy transactions until EIP-2718/2930 activates. if !pool.eip2718 && tx.Type() != types.LegacyTxType { return ErrTxTypeNotSupported } // Reject transactions over defined size to prevent DOS attacks if uint64(tx.Size()) > txMaxSize { return ErrOversizedData } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.GetValue().Sign() < 0 { return ErrNegativeValue } // Ensure the transaction doesn't exceed the current block limit gas. if pool.currentMaxGas < tx.GetGas() { return ErrGasLimit } // Make sure the transaction is signed properly. from, err := tx.Sender(*pool.signer) if err != nil { return ErrInvalidSender } // Drop non-local transactions under our own minimal accepted gas price if !local && pool.gasPrice.Gt(tx.GetPrice()) { return ErrUnderpriced } // Ensure the transaction adheres to nonce ordering if pool.currentState.GetNonce(from) > tx.GetNonce() { return ErrNonceTooLow } // Transactor should have enough funds to cover the costs // cost == V + GP * GL if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { return ErrInsufficientFunds } // Ensure the transaction has more gas than the basic tx fee. intrGas, err := IntrinsicGas(tx.GetData(), tx.GetAccessList(), tx.GetTo() == nil, true, pool.istanbul) if err != nil { return err } if tx.GetGas() < intrGas { return ErrIntrinsicGas } return nil } // add validates a transaction and inserts it into the non-executable queue for later // pending promotion and execution. If the transaction is a replacement for an already // pending or queued one, it overwrites the previous transaction if its price is higher. // // If a newly added transaction is marked as local, its sending account will be // whitelisted, preventing any associated transaction from being dropped out of the pool // due to pricing constraints. func (pool *TxPool) add(tx types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it hash := tx.Hash() if pool.all.Get(hash) != nil { log.Trace("Discarding already known transaction", "hash", hash) knownTxMeter.Mark(1) return false, ErrAlreadyKnown } // Make the local flag. If it's from local source or it's from the network but // the sender is marked as local previously, treat it as the local transaction. isLocal := local || pool.locals.containsTx(tx) // If the transaction fails basic validation, discard it if pool.currentState != nil { if err = pool.validateTx(tx, isLocal); err != nil { log.Trace("Discarding invalid transaction", "hash", hash, "err", err) invalidTxMeter.Mark(1) return false, err } } // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !isLocal && pool.priced.Underpriced(tx) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GetPrice()) underpricedTxMeter.Mark(1) return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it. // If it's a local transaction, forcibly discard all available transactions. // Otherwise if we can't make enough room for new one, abort the operation. drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) // Special case, we still can't make the room for the new remote one. if !isLocal && !success { log.Trace("Discarding overflown transaction", "hash", hash) overflowedTxMeter.Mark(1) return false, ErrTxPoolOverflow } // Kick out the underpriced remote transactions. for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GetPrice()) underpricedTxMeter.Mark(1) pool.removeTxLocked(tx.Hash(), false) } } // Try to replace an existing transaction in the pending pool from, _ := tx.Sender(*pool.signer) // already validated if list := pool.pending[from]; list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { pendingDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } // New transaction is better, replace old one if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } pool.all.Add(tx, isLocal) pool.priced.Put(tx, isLocal) pool.journalTx(from, tx) pool.queueTxEvent(tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.GetTo()) // Successful promotion, bump the heartbeat pool.beats[from] = time.Now() return old != nil, nil } // New transaction isn't replacing a pending one, push into queue replaced, err = pool.enqueueTx(hash, tx, isLocal, true) if err != nil { return false, err } // Mark local addresses and journal local transactions if local && !pool.locals.contains(from) { log.Info("Setting new local account", "address", from) pool.locals.add(from) pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. } if isLocal { localGauge.Inc(1) } pool.journalTx(from, tx) log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.GetTo()) return replaced, nil } // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! func (pool *TxPool) enqueueTx(hash common.Hash, tx types.Transaction, local bool, addAll bool) (bool, error) { // Try to insert the transaction into the future queue from, _ := tx.Sender(*pool.signer) // already validated if pool.queue[from] == nil { pool.queue[from] = newTxList(false) } inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this queuedDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } // Discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) queuedReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the queued counter queuedGauge.Inc(1) } // If the transaction isn't in lookup set but it's expected to be there, // show the error log. if pool.all.Get(hash) == nil && !addAll { log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) } if addAll { pool.all.Add(tx, local) pool.priced.Put(tx, local) } // If we never record the heartbeat, do it right now. if _, exist := pool.beats[from]; !exist { pool.beats[from] = time.Now() } return old != nil, nil } // journalTx adds the specified transaction to the local disk journal if it is // deemed to have been sent from a local account. func (pool *TxPool) journalTx(from common.Address, tx types.Transaction) { // Only journal if it's enabled and the transaction is local if pool.journal == nil || !pool.locals.contains(from) { return } if err := pool.journal.insert(tx); err != nil { log.Warn("Failed to journal local transaction", "err", err) } } // promoteTx adds a transaction to the pending (processable) list of transactions // and returns whether it was inserted or an older was better. // // Note, this method assumes the pool lock is held! func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx types.Transaction) bool { // Try to insert the transaction into the pending queue if pool.pending[addr] == nil { pool.pending[addr] = newTxList(true) } list := pool.pending[addr] inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this pool.all.Remove(hash) pool.priced.Removed(1) pendingDiscardMeter.Mark(1) return false } // Otherwise discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the pending counter pendingGauge.Inc(1) } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.pendingNonces.set(addr, tx.GetNonce()+1) // Successful promotion, bump the heartbeat pool.beats[addr] = time.Now() return true } // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the // senders as a local ones, ensuring they go around the local pricing constraints. // // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. func (pool *TxPool) AddLocals(txs []types.Transaction) []error { return pool.addTxs(txs, !pool.config.NoLocals, true) } // AddLocal enqueues a single local transaction into the pool if it is valid. This is // a convenience wrapper aroundd AddLocals. func (pool *TxPool) AddLocal(tx types.Transaction) error { errs := pool.AddLocals([]types.Transaction{tx}) return errs[0] } // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the // senders are not among the locally tracked ones, full pricing constraints will apply. // // This method is used to add transactions from the p2p network and does not wait for pool // reorganization and internal event propagation. func (pool *TxPool) AddRemotes(txs []types.Transaction) []error { return pool.addTxs(txs, false, false) } // This is like AddRemotes, but waits for pool reorganization. Tests use this method. func (pool *TxPool) AddRemotesSync(txs []types.Transaction) []error { return pool.addTxs(txs, false, true) } // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. func (pool *TxPool) addRemoteSync(tx types.Transaction) error { errs := pool.AddRemotesSync([]types.Transaction{tx}) return errs[0] } // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience // wrapper around AddRemotes. // // Deprecated: use AddRemotes func (pool *TxPool) AddRemote(tx types.Transaction) error { errs := pool.AddRemotes([]types.Transaction{tx}) return errs[0] } // addTxs attempts to queue a batch of transactions if they are valid. func (pool *TxPool) addTxs(txs []types.Transaction, local, sync bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( errs = make([]error, len(txs)) news = make([]types.Transaction, 0, len(txs)) ) for i, tx := range txs { // If the transaction is known, pre-set the error slot if pool.all.Get(tx.Hash()) != nil { errs[i] = ErrAlreadyKnown knownTxMeter.Mark(1) continue } // Exclude transactions with invalid signatures as soon as // possible and cache senders in transactions before // obtaining lock _, err := tx.Sender(*pool.signer) if err != nil { errs[i] = ErrInvalidSender invalidTxMeter.Mark(1) continue } // Accumulate all unknown transactions for deeper processing news = append(news, tx) } if len(news) == 0 { return errs } // Process all the new transaction and merge any errors into the original slice pool.mu.Lock() newErrs, dirtyAddrs := pool.addTxsLocked(news, local) pool.mu.Unlock() var nilSlot = 0 for _, err := range newErrs { for errs[nilSlot] != nil { nilSlot++ } errs[nilSlot] = err nilSlot++ } // Reorg the pool internals if needed and return done := pool.requestPromoteExecutables(dirtyAddrs) if sync { <-done } return errs } // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. func (pool *TxPool) addTxsLocked(txs []types.Transaction, local bool) ([]error, *accountSet) { dirty := newAccountSet(pool.signer) errs := make([]error, len(txs)) for i, tx := range txs { replaced, err := pool.add(tx, local) errs[i] = err if err == nil && !replaced { dirty.addTx(tx) } } validTxMeter.Mark(int64(len(dirty.accounts))) return errs, dirty } // Status returns the status (unknown/pending/queued) of a batch of transactions // identified by their hashes. func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { status := make([]TxStatus, len(hashes)) for i, hash := range hashes { tx := pool.Get(hash) if tx == nil { continue } from, _ := tx.Sender(*pool.signer) // already validated pool.mu.RLock() if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.GetNonce()] != nil { status[i] = TxStatusPending } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.GetNonce()] != nil { status[i] = TxStatusQueued } // implicit else: the tx may have been included into a block between // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct pool.mu.RUnlock() } return status } // Get returns a transaction if it is contained in the pool and nil otherwise. func (pool *TxPool) Get(hash common.Hash) types.Transaction { return pool.all.Get(hash) } // Has returns an indicator whether txpool has a transaction cached with the // given hash. func (pool *TxPool) Has(hash common.Hash) bool { return pool.all.Get(hash) != nil } // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. func (pool *TxPool) RemoveTx(hash common.Hash, outofbound bool) { pool.mu.Lock() defer pool.mu.Unlock() pool.removeTxLocked(hash, outofbound) } func (pool *TxPool) removeTxLocked(hash common.Hash, outofbound bool) { // Fetch the transaction we wish to delete tx := pool.all.Get(hash) if tx == nil { return } addr, _ := tx.Sender(*pool.signer) // already validated during insertion // Remove it from the list of known transactions pool.all.Remove(hash) if outofbound { pool.priced.Removed(1) } if pool.locals.contains(addr) { localGauge.Dec(1) } // Remove the transaction from the pending lists and reset the account nonce if pending := pool.pending[addr]; pending != nil { if removed, invalids := pending.Remove(tx); removed { // If no more pending transactions are left, remove the list if pending.Empty() { delete(pool.pending, addr) } // Postpone any invalidated transactions for _, tx := range invalids { // Internal shuffle shouldn't touch the lookup set. if _, err := pool.enqueueTx(tx.Hash(), tx, false, false); err != nil { log.Error("enqueueTx", "error", err) } } // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.GetNonce()) // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) return } } // Transaction is in the future queue if future := pool.queue[addr]; future != nil { if removed, _ := future.Remove(tx); removed { // Reduce the queued counter queuedGauge.Dec(1) } if future.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) } } } // requestPromoteExecutables requests a pool reset to the new head block. // The returned channel is closed when the reset has occurred. func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { select { case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: return <-pool.reorgDoneCh case <-pool.reorgShutdownCh: return pool.reorgShutdownCh } } // requestPromoteExecutables requests transaction promotion checks for the given addresses. // The returned channel is closed when the promotion checks have occurred. func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { select { case pool.reqPromoteCh <- set: return <-pool.reorgDoneCh case <-pool.reorgShutdownCh: return pool.reorgShutdownCh } } // queueTxEvent enqueues a transaction event to be sent in the next reorg run. func (pool *TxPool) queueTxEvent(tx types.Transaction) { select { case pool.queueTxEventCh <- tx: case <-pool.reorgShutdownCh: } } // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not // call those methods directly, but request them being run using requestReset and // requestPromoteExecutables instead. func (pool *TxPool) scheduleReorgLoop() { defer pool.wg.Done() var ( curDone chan struct{} // non-nil while runReorg is active nextDone = make(chan struct{}) launchNextRun bool dirtyAccounts *accountSet queuedEvents = make(map[common.Address]*txSortedMap) reset bool ) for { // Launch next background reorg if needed if curDone == nil && launchNextRun { // Run the background reorg and announcements go pool.runReorg(nextDone, dirtyAccounts, queuedEvents, reset) // Prepare everything for the next round of reorg curDone, nextDone = nextDone, make(chan struct{}) launchNextRun = false dirtyAccounts = nil reset = false queuedEvents = make(map[common.Address]*txSortedMap) } select { case <-pool.reqResetCh: // Reset request: update head if request is already pending. reset = true launchNextRun = true pool.reorgDoneCh <- nextDone case req := <-pool.reqPromoteCh: // Promote request: update address set if request is already pending. if dirtyAccounts == nil { dirtyAccounts = req } else { dirtyAccounts.merge(req) } launchNextRun = true pool.reorgDoneCh <- nextDone case tx := <-pool.queueTxEventCh: // Queue up the event, but don't schedule a reorg. It's up to the caller to // request one later if they want the events sent. addr, _ := tx.Sender(*pool.signer) if _, ok := queuedEvents[addr]; !ok { queuedEvents[addr] = newTxSortedMap() } queuedEvents[addr].Put(tx) case <-curDone: curDone = nil case <-pool.reorgShutdownCh: // Wait for current run to finish. if curDone != nil { <-curDone } close(nextDone) return } } } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. func (pool *TxPool) runReorg(done chan struct{}, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap, reset bool) { defer close(done) var promoteAddrs []common.Address if dirtyAccounts != nil && !reset { // Only dirty accounts need to be promoted, unless we're resetting. // For resets, all addresses in the tx queue will be promoted and // the flatten operation can be avoided. promoteAddrs = dirtyAccounts.flatten() } pool.mu.Lock() if reset { // Nonces were reset, discard any events that became stale for addr := range events { events[addr].Forward(pool.pendingNonces.get(addr)) if events[addr].Len() == 0 { delete(events, addr) } } // Reset needs promote for all addresses promoteAddrs = make([]common.Address, 0, len(pool.queue)) for addr := range pool.queue { promoteAddrs = append(promoteAddrs, addr) } } // Check for pending transactions for every account that sent new ones promoted := pool.promoteExecutables(promoteAddrs) // If a new block appeared, validate the pool of pending transactions. This will // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset { pool.demoteUnexecutables() } // Ensure pool.queue and pool.pending sizes stay within the configured limits. pool.truncatePending() pool.truncateQueue() // Update all accounts to the latest known pending nonce for addr, list := range pool.pending { highestPending := list.LastElement() pool.pendingNonces.set(addr, highestPending.GetNonce()+1) } pool.mu.Unlock() // Notify subsystems for newly added transactions for _, tx := range promoted { addr, _ := tx.Sender(*pool.signer) if _, ok := events[addr]; !ok { events[addr] = newTxSortedMap() } events[addr].Put(tx) } if len(events) > 0 { var txs []types.Transaction for _, set := range events { txs = append(txs, set.Flatten()...) } pool.txFeed.Send(NewTxsEvent{txs}) } } // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *TxPool) promoteExecutables(accounts []common.Address) []types.Transaction { // Track the promoted transactions to broadcast them at once var promoted []types.Transaction // Iterate over all accounts and promote any executable transactions for _, addr := range accounts { list := pool.queue[addr] if list == nil { continue // Just in case someone calls with a non existing account } // Drop all transactions that are deemed too old (low nonce) forwards := list.Forward(pool.currentState.GetNonce(addr)) for _, tx := range forwards { hash := tx.Hash() pool.all.Remove(hash) } log.Trace("Removed old queued transactions", "count", len(forwards)) // Drop all transactions that are too costly (low balance or out of gas) drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() pool.all.Remove(hash) } log.Trace("Removed unpayable queued transactions", "count", len(drops)) queuedNofundsMeter.Mark(int64(len(drops))) // Gather all executable transactions and promote them readies := list.Ready(pool.pendingNonces.get(addr)) for _, tx := range readies { hash := tx.Hash() if pool.promoteTx(addr, hash, tx) { promoted = append(promoted, tx) } } log.Trace("Promoted queued transactions", "count", len(promoted)) queuedGauge.Dec(int64(len(readies))) // Drop all transactions over the allowed limit var caps types.Transactions if !pool.locals.contains(addr) { caps = list.Cap(int(pool.config.AccountQueue)) for _, tx := range caps { hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } queuedRateLimitMeter.Mark(int64(len(caps))) } // Mark all the items dropped as removed pool.priced.Removed(len(forwards) + len(drops) + len(caps)) queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) if pool.locals.contains(addr) { localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) } // Delete the entire queue entry if it became empty. if list.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) } } return promoted } // truncatePending removes transactions from the pending queue if the pool is above the // pending limit. The algorithm tries to reduce transaction counts by an approximately // equal number for all for accounts with many pending transactions. func (pool *TxPool) truncatePending() { pending := uint64(0) for _, list := range pool.pending { pending += uint64(list.Len()) } if pending <= pool.config.GlobalSlots { return } pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders offenders := []common.Address{} for pending > pool.config.GlobalSlots && !spammers.Empty() { // Retrieve the next offender if not local address offender, _ := spammers.Pop() offenders = append(offenders, offender.(common.Address)) // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders threshold := pool.pending[offender.(common.Address)].Len() // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { for i := 0; i < len(offenders)-1; i++ { list := pool.pending[offenders[i]] caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(offenders[i], tx.GetNonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pendingGauge.Dec(int64(len(caps))) if pool.locals.contains(offenders[i]) { localGauge.Dec(int64(len(caps))) } pending-- } } } } // If still above threshold, reduce to limit or min allowance if pending > pool.config.GlobalSlots && len(offenders) > 0 { for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { for _, addr := range offenders { list := pool.pending[addr] caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(addr, tx.GetNonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pendingGauge.Dec(int64(len(caps))) if pool.locals.contains(addr) { localGauge.Dec(int64(len(caps))) } pending-- } } } pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. func (pool *TxPool) truncateQueue() { queued := uint64(0) for _, list := range pool.queue { queued += uint64(list.Len()) } if queued <= pool.config.GlobalQueue { return } // Sort all accounts with queued transactions by heartbeat addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) } } sort.Sort(addresses) // Drop transactions until the total is below the limit or only locals remain for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { addr := addresses[len(addresses)-1] list := pool.queue[addr.address] addresses = addresses[:len(addresses)-1] // Drop all transactions if they are less than the overflow if size := uint64(list.Len()); size <= drop { for _, tx := range list.Flatten() { pool.removeTxLocked(tx.Hash(), true) } drop -= size queuedRateLimitMeter.Mark(int64(size)) continue } // Otherwise drop only last few transactions txs := list.Flatten() for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTxLocked(txs[i].Hash(), true) drop-- queuedRateLimitMeter.Mark(1) } } } // demoteUnexecutables removes invalid and processed transactions from the pools // executable/pending queue and any subsequent transactions that become unexecutable // are moved back into the future queue. func (pool *TxPool) demoteUnexecutables() { // Iterate over all accounts and demote any non-executable transactions for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr) // Drop all transactions that are deemed too old (low nonce) olds := list.Forward(nonce) for _, tx := range olds { hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) pool.all.Remove(hash) } pool.priced.Removed(len(olds) + len(drops)) pendingNofundsMeter.Mark(int64(len(drops))) for _, tx := range invalids { hash := tx.Hash() log.Trace("Demoting pending transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. if _, err := pool.enqueueTx(hash, tx, false, false); err != nil { log.Error("enqueueTx", "error", err) } } pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) if pool.locals.contains(addr) { localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) } // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { gapped := list.Cap(0) for _, tx := range gapped { hash := tx.Hash() log.Error("Demoting invalidated transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. if _, err := pool.enqueueTx(hash, tx, false, false); err != nil { log.Error("enqueueTx", "error", err) } } pendingGauge.Dec(int64(len(gapped))) // This might happen in a reorg, so log it to the metering blockReorgInvalidatedTx.Mark(int64(len(gapped))) } // Delete the entire pending entry if it became empty. if list.Empty() { delete(pool.pending, addr) } } } func (pool *TxPool) IsStarted() bool { if pool == nil { return false } return pool.isStarted } func (pool *TxPool) AddInit(fns ...func() error) { if pool == nil { return } pool.initFns = append(pool.initFns, fns...) } func (pool *TxPool) RunInit() error { if pool == nil { return errors.New("can't init a nil transaction pool") } if pool.IsStarted() { return errors.New("transaction pool is already started") } var err error for _, fn := range pool.initFns { if err = fn(); err != nil { return fmt.Errorf("can't init a transaction pool: %w", err) } } return nil } func (pool *TxPool) AddStop(fns ...func() error) { if pool == nil { return } pool.stopFns = append(pool.stopFns, fns...) } func (pool *TxPool) RunStop() error { if pool == nil { return errors.New("can't stop a nil transaction pool") } if !pool.IsStarted() { return errors.New("transaction pool is already stopped") } var err error for _, fn := range pool.stopFns { if err = fn(); err != nil { return fmt.Errorf("can't stop a transaction pool: %w", err) } } return nil } // addressByHeartbeat is an account address tagged with its last activity timestamp. type addressByHeartbeat struct { address common.Address heartbeat time.Time } type addressesByHeartbeat []addressByHeartbeat func (a addressesByHeartbeat) Len() int { return len(a) } func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. type accountSet struct { accounts map[common.Address]struct{} signer *types.Signer cache *[]common.Address } // newAccountSet creates a new address set with an associated signer for sender // derivations. func newAccountSet(signer *types.Signer, addrs ...common.Address) *accountSet { as := &accountSet{ accounts: make(map[common.Address]struct{}), signer: signer, } for _, addr := range addrs { as.add(addr) } return as } // contains checks if a given address is contained within the set. func (as *accountSet) contains(addr common.Address) bool { _, exist := as.accounts[addr] return exist } // containsTx checks if the sender of a given tx is within the set. If the sender // cannot be derived, this method returns false. func (as *accountSet) containsTx(tx types.Transaction) bool { if addr, err := tx.Sender(*as.signer); err == nil { return as.contains(addr) } return false } // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} as.cache = nil } // addTx adds the sender of tx into the set. func (as *accountSet) addTx(tx types.Transaction) { if addr, err := tx.Sender(*as.signer); err == nil { as.add(addr) } } // flatten returns the list of addresses within this set, also caching it for later // reuse. The returned slice should not be changed! func (as *accountSet) flatten() []common.Address { if as.cache == nil { accounts := make([]common.Address, 0, len(as.accounts)) for account := range as.accounts { accounts = append(accounts, account) } as.cache = &accounts } return *as.cache } // merge adds all addresses from the 'other' set into 'as'. func (as *accountSet) merge(other *accountSet) { for addr := range other.accounts { as.accounts[addr] = struct{}{} } as.cache = nil } // txLookup is used internally by Backend to track transactions while allowing lookup without // mutex contention. // // Note, although this type is properly protected against concurrent access, it // is **not** a type that should ever be mutated or even exposed outside of the // transaction pool, since its internal state is tightly coupled with the pools // internal mechanisms. The sole purpose of the type is to permit out-of-bound // peeking into the pool in Backend.Get without having to acquire the widely scoped // Backend.mu mutex. // // This lookup set combines the notion of "local transactions", which is useful // to build upper-level structure. type txLookup struct { slots int lock sync.RWMutex locals map[common.Hash]types.Transaction remotes map[common.Hash]types.Transaction } // newTxLookup returns a new txLookup structure. func newTxLookup() *txLookup { return &txLookup{ locals: make(map[common.Hash]types.Transaction), remotes: make(map[common.Hash]types.Transaction), } } // Range calls f on each key and value present in the map. The callback passed // should return the indicator whether the iteration needs to be continued. // Callers need to specify which set (or both) to be iterated. func (t *txLookup) Range(f func(hash common.Hash, tx types.Transaction, local bool) bool, local bool, remote bool) { t.lock.RLock() defer t.lock.RUnlock() if local { for key, value := range t.locals { if !f(key, value, true) { return } } } if remote { for key, value := range t.remotes { if !f(key, value, false) { return } } } } // Get returns a transaction if it exists in the lookup, or nil if not found. func (t *txLookup) Get(hash common.Hash) types.Transaction { t.lock.RLock() defer t.lock.RUnlock() if tx := t.locals[hash]; tx != nil { return tx } return t.remotes[hash] } // GetLocal returns a transaction if it exists in the lookup, or nil if not found. func (t *txLookup) GetLocal(hash common.Hash) types.Transaction { t.lock.RLock() defer t.lock.RUnlock() return t.locals[hash] } // GetRemote returns a transaction if it exists in the lookup, or nil if not found. func (t *txLookup) GetRemote(hash common.Hash) types.Transaction { t.lock.RLock() defer t.lock.RUnlock() return t.remotes[hash] } // Count returns the current number of transactions in the lookup. func (t *txLookup) Count() int { t.lock.RLock() defer t.lock.RUnlock() return len(t.locals) + len(t.remotes) } // LocalCount returns the current number of local transactions in the lookup. func (t *txLookup) LocalCount() int { t.lock.RLock() defer t.lock.RUnlock() return len(t.locals) } // RemoteCount returns the current number of remote transactions in the lookup. func (t *txLookup) RemoteCount() int { t.lock.RLock() defer t.lock.RUnlock() return len(t.remotes) } // Slots returns the current number of slots used in the lookup. func (t *txLookup) Slots() int { t.lock.RLock() defer t.lock.RUnlock() return t.slots } // Add adds a transaction to the lookup. func (t *txLookup) Add(tx types.Transaction, local bool) { t.lock.Lock() defer t.lock.Unlock() t.slots += numSlots(tx) slotsGauge.Update(int64(t.slots)) if local { t.locals[tx.Hash()] = tx } else { t.remotes[tx.Hash()] = tx } } // Remove removes a transaction from the lookup. func (t *txLookup) Remove(hash common.Hash) { t.lock.Lock() defer t.lock.Unlock() tx, ok := t.locals[hash] if !ok { tx, ok = t.remotes[hash] } if !ok { log.Error("No transaction found to be deleted", "hash", hash) return } t.slots -= numSlots(tx) slotsGauge.Update(int64(t.slots)) delete(t.locals, hash) delete(t.remotes, hash) } // RemoteToLocals migrates the transactions belongs to the given locals to locals // set. The assumption is held the locals set is thread-safe to be used. func (t *txLookup) RemoteToLocals(locals *accountSet) int { t.lock.Lock() defer t.lock.Unlock() var migrated int for hash, tx := range t.remotes { if locals.containsTx(tx) { t.locals[hash] = tx delete(t.remotes, hash) migrated++ } } return migrated } // numSlots calculates the number of slots needed for a single transaction. func numSlots(tx types.Transaction) int { return int((tx.Size() + txSlotSize - 1) / txSlotSize) }
1
22,173
Want to remove this?
ledgerwatch-erigon
go
@@ -2,6 +2,7 @@ package sql import ( "github.com/jinzhu/gorm" + // gorm postgres dialect init registration _ "github.com/jinzhu/gorm/dialects/postgres" )
1
package sql import ( "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/postgres" ) type postgres struct{} func (p postgres) connect(connectionString string) (*gorm.DB, error) { db, err := gorm.Open("postgres", connectionString) if err != nil { return nil, sqlError.Wrap(err) } return db, nil }
1
10,630
is this a golint requirement? huh. mixed feelings :)
spiffe-spire
go
@@ -33,6 +33,14 @@ public class BCryptPasswordEncoderTests { String result = encoder.encode("password"); assertThat(result.equals("password")).isFalse(); assertThat(encoder.matches("password", result)).isTrue(); + encoder = new BCryptPasswordEncoder(BCryptPasswordEncoder.BCRYPT_VERSION_2B); + result = encoder.encode("password"); + assertThat(result.equals("password")).isFalse(); + assertThat(encoder.matches("password", result)).isTrue(); + encoder = new BCryptPasswordEncoder(BCryptPasswordEncoder.BCRYPT_VERSION_2Y); + result = encoder.encode("password"); + assertThat(result.equals("password")).isFalse(); + assertThat(encoder.matches("password", result)).isTrue(); } @Test
1
/* * Copyright 2002-2011 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.crypto.bcrypt; import org.junit.Test; import java.security.SecureRandom; import static org.assertj.core.api.Assertions.assertThat; /** * @author Dave Syer * */ public class BCryptPasswordEncoderTests { @Test public void matches() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(); String result = encoder.encode("password"); assertThat(result.equals("password")).isFalse(); assertThat(encoder.matches("password", result)).isTrue(); } @Test public void unicode() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(); String result = encoder.encode("passw\u9292rd"); assertThat(encoder.matches("pass\u9292\u9292rd", result)).isFalse(); assertThat(encoder.matches("passw\u9292rd", result)).isTrue(); } @Test public void notMatches() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(); String result = encoder.encode("password"); assertThat(encoder.matches("bogus", result)).isFalse(); } @Test public void customStrength() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(8); String result = encoder.encode("password"); assertThat(encoder.matches("password", result)).isTrue(); } @Test(expected = IllegalArgumentException.class) public void badLowCustomStrength() { new BCryptPasswordEncoder(3); } @Test(expected = IllegalArgumentException.class) public void badHighCustomStrength() { new BCryptPasswordEncoder(32); } @Test public void customRandom() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(8, new SecureRandom()); String result = encoder.encode("password"); assertThat(encoder.matches("password", result)).isTrue(); } @Test public void doesntMatchNullEncodedValue() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(); assertThat(encoder.matches("password", null)).isFalse(); } @Test public void doesntMatchEmptyEncodedValue() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(); assertThat(encoder.matches("password", "")).isFalse(); } @Test public void doesntMatchBogusEncodedValue() { BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(); assertThat(encoder.matches("password", "012345678901234567890123456789")).isFalse(); } }
1
11,126
Generally with these tests please split up these tests so that there is a different test for each method
spring-projects-spring-security
java
@@ -1394,16 +1394,16 @@ void nano::bootstrap_attempt::lazy_add (nano::block_hash const & hash_a) } } -void nano::bootstrap_attempt::lazy_pull_flush () +void nano::bootstrap_attempt::lazy_pull_flush (std::unique_lock<std::mutex> & lock_a) { - std::unique_lock<std::mutex> lock (lazy_mutex); + std::unique_lock<std::mutex> lazy_lock (lazy_mutex); auto transaction (node->store.tx_begin_read ()); for (auto & pull_start : lazy_pulls) { // Recheck if block was already processed if (lazy_blocks.find (pull_start) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start)) { - add_pull (nano::pull_info (pull_start, pull_start, nano::block_hash (0), lazy_max_pull_blocks)); + pulls.push_back (nano::pull_info (pull_start, pull_start, nano::block_hash (0), lazy_max_pull_blocks)); } } lazy_pulls.clear ();
1
#include <nano/node/bootstrap.hpp> #include <nano/node/common.hpp> #include <nano/node/node.hpp> #include <algorithm> #include <boost/log/trivial.hpp> constexpr double bootstrap_connection_scale_target_blocks = 50000.0; constexpr double bootstrap_connection_warmup_time_sec = 5.0; constexpr double bootstrap_minimum_blocks_per_sec = 10.0; constexpr double bootstrap_minimum_elapsed_seconds_blockrate = 0.02; constexpr double bootstrap_minimum_frontier_blocks_per_sec = 1000.0; constexpr unsigned bootstrap_frontier_retry_limit = 16; constexpr double bootstrap_minimum_termination_time_sec = 30.0; constexpr unsigned bootstrap_max_new_connections = 10; constexpr unsigned bulk_push_cost_limit = 200; size_t constexpr nano::frontier_req_client::size_frontier; nano::socket::socket (std::shared_ptr<nano::node> node_a) : socket_m (node_a->io_ctx), cutoff (std::numeric_limits<uint64_t>::max ()), node (node_a) { } void nano::socket::async_connect (nano::tcp_endpoint const & endpoint_a, std::function<void(boost::system::error_code const &)> callback_a) { checkup (); auto this_l (shared_from_this ()); start (); socket_m.async_connect (endpoint_a, [this_l, callback_a](boost::system::error_code const & ec) { this_l->stop (); callback_a (ec); }); } void nano::socket::async_read (std::shared_ptr<std::vector<uint8_t>> buffer_a, size_t size_a, std::function<void(boost::system::error_code const &, size_t)> callback_a) { assert (size_a <= buffer_a->size ()); auto this_l (shared_from_this ()); if (socket_m.is_open ()) { start (); boost::asio::async_read (socket_m, boost::asio::buffer (buffer_a->data (), size_a), [this_l, callback_a](boost::system::error_code const & ec, size_t size_a) { this_l->node->stats.add (nano::stat::type::traffic_bootstrap, nano::stat::dir::in, size_a); this_l->stop (); callback_a (ec, size_a); }); } } void nano::socket::async_write (std::shared_ptr<std::vector<uint8_t>> buffer_a, std::function<void(boost::system::error_code const &, size_t)> callback_a) { auto this_l (shared_from_this ()); if (socket_m.is_open ()) { start (); boost::asio::async_write (socket_m, boost::asio::buffer (buffer_a->data (), buffer_a->size ()), [this_l, callback_a, buffer_a](boost::system::error_code const & ec, size_t size_a) { this_l->node->stats.add (nano::stat::type::traffic_bootstrap, nano::stat::dir::out, size_a); this_l->stop (); callback_a (ec, size_a); }); } } void nano::socket::start (std::chrono::steady_clock::time_point timeout_a) { cutoff = timeout_a.time_since_epoch ().count (); } void nano::socket::stop () { cutoff = std::numeric_limits<uint64_t>::max (); } void nano::socket::close () { if (socket_m.is_open ()) { try { socket_m.shutdown (boost::asio::ip::tcp::socket::shutdown_both); } catch (...) { /* Ignore spurious exceptions; shutdown is best effort. */ } socket_m.close (); } } void nano::socket::checkup () { std::weak_ptr<nano::socket> this_w (shared_from_this ()); node->alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (10), [this_w]() { if (auto this_l = this_w.lock ()) { if (this_l->cutoff != std::numeric_limits<uint64_t>::max () && this_l->cutoff < static_cast<uint64_t> (std::chrono::steady_clock::now ().time_since_epoch ().count ())) { if (this_l->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Disconnecting from %1% due to timeout") % this_l->remote_endpoint ()); } this_l->close (); } else { this_l->checkup (); } } }); } nano::tcp_endpoint nano::socket::remote_endpoint () { nano::tcp_endpoint endpoint; if (socket_m.is_open ()) { boost::system::error_code remote_endpoint_error; endpoint = socket_m.remote_endpoint (remote_endpoint_error); } return endpoint; } nano::bootstrap_client::bootstrap_client (std::shared_ptr<nano::node> node_a, std::shared_ptr<nano::bootstrap_attempt> attempt_a, nano::tcp_endpoint const & endpoint_a) : node (node_a), attempt (attempt_a), socket (std::make_shared<nano::socket> (node_a)), receive_buffer (std::make_shared<std::vector<uint8_t>> ()), endpoint (endpoint_a), start_time (std::chrono::steady_clock::now ()), block_count (0), pending_stop (false), hard_stop (false) { ++attempt->connections; receive_buffer->resize (256); } nano::bootstrap_client::~bootstrap_client () { --attempt->connections; } double nano::bootstrap_client::block_rate () const { auto elapsed = std::max (elapsed_seconds (), bootstrap_minimum_elapsed_seconds_blockrate); return static_cast<double> (block_count.load () / elapsed); } double nano::bootstrap_client::elapsed_seconds () const { return std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time).count (); } void nano::bootstrap_client::stop (bool force) { pending_stop = true; if (force) { hard_stop = true; } } void nano::bootstrap_client::run () { auto this_l (shared_from_this ()); socket->async_connect (endpoint, [this_l](boost::system::error_code const & ec) { if (!ec) { if (this_l->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Connection established to %1%") % this_l->endpoint); } this_l->attempt->pool_connection (this_l->shared_from_this ()); } else { if (this_l->node->config.logging.network_logging ()) { switch (ec.value ()) { default: BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Error initiating bootstrap connection to %1%: %2%") % this_l->endpoint % ec.message ()); break; case boost::system::errc::connection_refused: case boost::system::errc::operation_canceled: case boost::system::errc::timed_out: case 995: //Windows The I/O operation has been aborted because of either a thread exit or an application request case 10061: //Windows No connection could be made because the target machine actively refused it break; } } } }); } void nano::frontier_req_client::run () { std::unique_ptr<nano::frontier_req> request (new nano::frontier_req); request->start.clear (); request->age = std::numeric_limits<decltype (request->age)>::max (); request->count = std::numeric_limits<decltype (request->count)>::max (); auto send_buffer (std::make_shared<std::vector<uint8_t>> ()); { nano::vectorstream stream (*send_buffer); request->serialize (stream); } auto this_l (shared_from_this ()); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->receive_frontier (); } else { if (this_l->connection->node->config.logging.network_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error while sending bootstrap request %1%") % ec.message ()); } } }); } std::shared_ptr<nano::bootstrap_client> nano::bootstrap_client::shared () { return shared_from_this (); } nano::frontier_req_client::frontier_req_client (std::shared_ptr<nano::bootstrap_client> connection_a) : connection (connection_a), current (0), count (0), bulk_push_cost (0) { auto transaction (connection->node->store.tx_begin_read ()); next (transaction); } nano::frontier_req_client::~frontier_req_client () { } void nano::frontier_req_client::receive_frontier () { auto this_l (shared_from_this ()); connection->socket->async_read (connection->receive_buffer, nano::frontier_req_client::size_frontier, [this_l](boost::system::error_code const & ec, size_t size_a) { // An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect, // we simply get a size of 0. if (size_a == nano::frontier_req_client::size_frontier) { this_l->received_frontier (ec, size_a); } else { if (this_l->connection->node->config.logging.network_message_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Invalid size: expected %1%, got %2%") % nano::frontier_req_client::size_frontier % size_a); } } }); } void nano::frontier_req_client::unsynced (nano::block_hash const & head, nano::block_hash const & end) { if (bulk_push_cost < bulk_push_cost_limit) { connection->attempt->add_bulk_push_target (head, end); if (end.is_zero ()) { bulk_push_cost += 2; } else { bulk_push_cost += 1; } } } void nano::frontier_req_client::received_frontier (boost::system::error_code const & ec, size_t size_a) { if (!ec) { assert (size_a == nano::frontier_req_client::size_frontier); nano::account account; nano::bufferstream account_stream (connection->receive_buffer->data (), sizeof (account)); auto error1 (nano::try_read (account_stream, account)); assert (!error1); nano::block_hash latest; nano::bufferstream latest_stream (connection->receive_buffer->data () + sizeof (account), sizeof (latest)); auto error2 (nano::try_read (latest_stream, latest)); assert (!error2); if (count == 0) { start_time = std::chrono::steady_clock::now (); } ++count; std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time); double elapsed_sec = std::max (time_span.count (), bootstrap_minimum_elapsed_seconds_blockrate); double blocks_per_sec = static_cast<double> (count) / elapsed_sec; if (elapsed_sec > bootstrap_connection_warmup_time_sec && blocks_per_sec < bootstrap_minimum_frontier_blocks_per_sec) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Aborting frontier req because it was too slow")); promise.set_value (true); return; } if (connection->attempt->should_log ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Received %1% frontiers from %2%") % std::to_string (count) % connection->socket->remote_endpoint ()); } auto transaction (connection->node->store.tx_begin_read ()); if (!account.is_zero ()) { while (!current.is_zero () && current < account) { // We know about an account they don't. unsynced (frontier, 0); next (transaction); } if (!current.is_zero ()) { if (account == current) { if (latest == frontier) { // In sync } else { if (connection->node->store.block_exists (transaction, latest)) { // We know about a block they don't. unsynced (frontier, latest); } else { connection->attempt->add_pull (nano::pull_info (account, latest, frontier)); // Either we're behind or there's a fork we differ on // Either way, bulk pushing will probably not be effective bulk_push_cost += 5; } } next (transaction); } else { assert (account < current); connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0))); } } else { connection->attempt->add_pull (nano::pull_info (account, latest, nano::block_hash (0))); } receive_frontier (); } else { while (!current.is_zero ()) { // We know about an account they don't. unsynced (frontier, 0); next (transaction); } if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Bulk push cost: " << bulk_push_cost; } { try { promise.set_value (false); } catch (std::future_error &) { } connection->attempt->pool_connection (connection); } } } else { if (connection->node->config.logging.network_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error while receiving frontier %1%") % ec.message ()); } } } void nano::frontier_req_client::next (nano::transaction const & transaction_a) { // Filling accounts deque to prevent often read transactions if (accounts.empty ()) { size_t max_size (128); for (auto i (connection->node->store.latest_begin (transaction_a, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i) { nano::account_info info (i->second); accounts.push_back (std::make_pair (nano::account (i->first), info.head)); } /* If loop breaks before max_size, then latest_end () is reached Add empty record to finish frontier_req_server */ if (accounts.size () != max_size) { accounts.push_back (std::make_pair (nano::account (0), nano::block_hash (0))); } } // Retrieving accounts from deque auto account_pair (accounts.front ()); accounts.pop_front (); current = account_pair.first; frontier = account_pair.second; } nano::bulk_pull_client::bulk_pull_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::pull_info const & pull_a) : connection (connection_a), known_account (0), pull (pull_a), total_blocks (0), unexpected_count (0) { std::lock_guard<std::mutex> mutex (connection->attempt->mutex); connection->attempt->condition.notify_all (); } nano::bulk_pull_client::~bulk_pull_client () { // If received end block is not expected end block if (expected != pull.end) { pull.head = expected; if (connection->attempt->mode != nano::bootstrap_mode::legacy) { pull.account = expected; } connection->attempt->requeue_pull (pull); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull end block is not expected %1% for account %2%") % pull.end.to_string () % pull.account.to_account ()); } } { std::lock_guard<std::mutex> mutex (connection->attempt->mutex); --connection->attempt->pulling; } connection->attempt->condition.notify_all (); } void nano::bulk_pull_client::request () { expected = pull.head; nano::bulk_pull req; req.start = pull.account; req.end = pull.end; req.count = pull.count; req.set_count_present (pull.count != 0); auto buffer (std::make_shared<std::vector<uint8_t>> ()); { nano::vectorstream stream (*buffer); req.serialize (stream); } if (connection->node->config.logging.bulk_pull_logging ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); BOOST_LOG (connection->node->log) << boost::str (boost::format ("Requesting account %1% from %2%. %3% accounts in queue") % req.start.to_account () % connection->endpoint % connection->attempt->pulls.size ()); } else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); BOOST_LOG (connection->node->log) << boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->pulls.size ()); } auto this_l (shared_from_this ()); connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->receive_block (); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error sending bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->endpoint); } } }); } void nano::bulk_pull_client::receive_block () { auto this_l (shared_from_this ()); connection->socket->async_read (connection->receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->received_type (); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error receiving block type: %1%") % ec.message ()); } } }); } void nano::bulk_pull_client::received_type () { auto this_l (shared_from_this ()); nano::block_type type (static_cast<nano::block_type> (connection->receive_buffer->data ()[0])); switch (type) { case nano::block_type::send: { connection->socket->async_read (connection->receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::receive: { connection->socket->async_read (connection->receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::open: { connection->socket->async_read (connection->receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::change: { connection->socket->async_read (connection->receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::state: { connection->socket->async_read (connection->receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::not_a_block: { // Avoid re-using slow peers, or peers that sent the wrong blocks. if (!connection->pending_stop && expected == pull.end) { connection->attempt->pool_connection (connection); } break; } default: { if (connection->node->config.logging.network_packet_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unknown type received as block type: %1%") % static_cast<int> (type)); } break; } } } void nano::bulk_pull_client::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a) { if (!ec) { nano::bufferstream stream (connection->receive_buffer->data (), size_a); std::shared_ptr<nano::block> block (nano::deserialize_block (stream, type_a)); if (block != nullptr && !nano::work_validate (*block)) { auto hash (block->hash ()); if (connection->node->config.logging.bulk_pull_logging ()) { std::string block_l; block->serialize_json (block_l); BOOST_LOG (connection->node->log) << boost::str (boost::format ("Pulled block %1% %2%") % hash.to_string () % block_l); } // Is block expected? bool block_expected (false); if (hash == expected) { expected = block->previous (); block_expected = true; } else { unexpected_count++; } if (total_blocks == 0 && block_expected) { known_account = block->account (); } if (connection->block_count++ == 0) { connection->start_time = std::chrono::steady_clock::now (); } connection->attempt->total_blocks++; total_blocks++; bool stop_pull (connection->attempt->process_block (block, known_account, total_blocks, block_expected)); if (!stop_pull && !connection->hard_stop.load ()) { /* Process block in lazy pull if not stopped Stop usual pull request with unexpected block & more than 16k blocks processed to prevent spam */ if (connection->attempt->mode != nano::bootstrap_mode::legacy || unexpected_count < 16384) { receive_block (); } } else if (stop_pull && block_expected) { expected = pull.end; connection->attempt->pool_connection (connection); } if (stop_pull) { connection->attempt->lazy_stopped++; } } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Error deserializing block received from pull request"; } } } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error bulk receiving block: %1%") % ec.message ()); } } } nano::bulk_push_client::bulk_push_client (std::shared_ptr<nano::bootstrap_client> const & connection_a) : connection (connection_a) { } nano::bulk_push_client::~bulk_push_client () { } void nano::bulk_push_client::start () { nano::bulk_push message; auto buffer (std::make_shared<std::vector<uint8_t>> ()); { nano::vectorstream stream (*buffer); message.serialize (stream); } auto this_l (shared_from_this ()); connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { auto transaction (this_l->connection->node->store.tx_begin_read ()); if (!ec) { this_l->push (transaction); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Unable to send bulk_push request: %1%") % ec.message ()); } } }); } void nano::bulk_push_client::push (nano::transaction const & transaction_a) { std::shared_ptr<nano::block> block; bool finished (false); while (block == nullptr && !finished) { if (current_target.first.is_zero () || current_target.first == current_target.second) { std::lock_guard<std::mutex> guard (connection->attempt->mutex); if (!connection->attempt->bulk_push_targets.empty ()) { current_target = connection->attempt->bulk_push_targets.back (); connection->attempt->bulk_push_targets.pop_back (); } else { finished = true; } } if (!finished) { block = connection->node->store.block_get (transaction_a, current_target.first); if (block == nullptr) { current_target.first = nano::block_hash (0); } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Bulk pushing range " << current_target.first.to_string () << " down to " << current_target.second.to_string (); } } } } if (finished) { send_finished (); } else { current_target.first = block->previous (); push_block (*block); } } void nano::bulk_push_client::send_finished () { auto buffer (std::make_shared<std::vector<uint8_t>> ()); buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block)); connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_push, nano::stat::dir::out); if (connection->node->config.logging.network_logging ()) { BOOST_LOG (connection->node->log) << "Bulk push finished"; } auto this_l (shared_from_this ()); connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { try { this_l->promise.set_value (false); } catch (std::future_error &) { } }); } void nano::bulk_push_client::push_block (nano::block const & block_a) { auto buffer (std::make_shared<std::vector<uint8_t>> ()); { nano::vectorstream stream (*buffer); nano::serialize_block (stream, block_a); } auto this_l (shared_from_this ()); connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { auto transaction (this_l->connection->node->store.tx_begin_read ()); this_l->push (transaction); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error sending block during bulk push: %1%") % ec.message ()); } } }); } nano::bulk_pull_account_client::bulk_pull_account_client (std::shared_ptr<nano::bootstrap_client> connection_a, nano::account const & account_a) : connection (connection_a), account (account_a), total_blocks (0) { connection->attempt->condition.notify_all (); } nano::bulk_pull_account_client::~bulk_pull_account_client () { { std::lock_guard<std::mutex> mutex (connection->attempt->mutex); --connection->attempt->pulling; } connection->attempt->condition.notify_all (); } void nano::bulk_pull_account_client::request () { nano::bulk_pull_account req; req.account = account; req.minimum_amount = connection->node->config.receive_minimum; req.flags = nano::bulk_pull_account_flags::pending_hash_and_amount; auto buffer (std::make_shared<std::vector<uint8_t>> ()); { nano::vectorstream stream (*buffer); req.serialize (stream); } if (connection->node->config.logging.bulk_pull_logging ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); BOOST_LOG (connection->node->log) << boost::str (boost::format ("Requesting pending for account %1% from %2%. %3% accounts in queue") % req.account.to_account () % connection->endpoint % connection->attempt->wallet_accounts.size ()); } else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ()) { std::unique_lock<std::mutex> lock (connection->attempt->mutex); BOOST_LOG (connection->node->log) << boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->wallet_accounts.size ()); } auto this_l (shared_from_this ()); connection->socket->async_write (buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->receive_pending (); } else { this_l->connection->attempt->requeue_pending (this_l->account); if (this_l->connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error starting bulk pull request to %1%: to %2%") % ec.message () % this_l->connection->endpoint); } } }); } void nano::bulk_pull_account_client::receive_pending () { auto this_l (shared_from_this ()); size_t size_l (sizeof (nano::uint256_union) + sizeof (nano::uint128_union)); connection->socket->async_read (connection->receive_buffer, size_l, [this_l, size_l](boost::system::error_code const & ec, size_t size_a) { // An issue with asio is that sometimes, instead of reporting a bad file descriptor during disconnect, // we simply get a size of 0. if (size_a == size_l) { if (!ec) { nano::block_hash pending; nano::bufferstream frontier_stream (this_l->connection->receive_buffer->data (), sizeof (nano::uint256_union)); auto error1 (nano::try_read (frontier_stream, pending)); assert (!error1); nano::amount balance; nano::bufferstream balance_stream (this_l->connection->receive_buffer->data () + sizeof (nano::uint256_union), sizeof (nano::uint128_union)); auto error2 (nano::try_read (balance_stream, balance)); assert (!error2); if (this_l->total_blocks == 0 || !pending.is_zero ()) { if (this_l->total_blocks == 0 || balance.number () >= this_l->connection->node->config.receive_minimum.number ()) { this_l->total_blocks++; { if (!pending.is_zero ()) { auto transaction (this_l->connection->node->store.tx_begin_read ()); if (!this_l->connection->node->store.block_exists (transaction, pending)) { this_l->connection->attempt->lazy_start (pending); } } } this_l->receive_pending (); } else { this_l->connection->attempt->requeue_pending (this_l->account); } } else { this_l->connection->attempt->pool_connection (this_l->connection); } } else { this_l->connection->attempt->requeue_pending (this_l->account); if (this_l->connection->node->config.logging.network_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error while receiving bulk pull account frontier %1%") % ec.message ()); } } } else { this_l->connection->attempt->requeue_pending (this_l->account); if (this_l->connection->node->config.logging.network_message_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Invalid size: expected %1%, got %2%") % size_l % size_a); } } }); } nano::pull_info::pull_info () : account (0), end (0), count (0), attempts (0) { } nano::pull_info::pull_info (nano::account const & account_a, nano::block_hash const & head_a, nano::block_hash const & end_a, count_t count_a) : account (account_a), head (head_a), end (end_a), count (count_a), attempts (0) { } nano::bootstrap_attempt::bootstrap_attempt (std::shared_ptr<nano::node> node_a) : next_log (std::chrono::steady_clock::now ()), connections (0), pulling (0), node (node_a), account_count (0), total_blocks (0), runs_count (0), stopped (false), mode (nano::bootstrap_mode::legacy), lazy_stopped (0) { BOOST_LOG (node->log) << "Starting bootstrap attempt"; node->bootstrap_initiator.notify_listeners (true); } nano::bootstrap_attempt::~bootstrap_attempt () { BOOST_LOG (node->log) << "Exiting bootstrap attempt"; node->bootstrap_initiator.notify_listeners (false); } bool nano::bootstrap_attempt::should_log () { std::lock_guard<std::mutex> lock (mutex); auto result (false); auto now (std::chrono::steady_clock::now ()); if (next_log < now) { result = true; next_log = now + std::chrono::seconds (15); } return result; } bool nano::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & lock_a) { auto result (true); auto connection_l (connection (lock_a)); connection_frontier_request = connection_l; if (connection_l) { std::future<bool> future; { auto client (std::make_shared<nano::frontier_req_client> (connection_l)); client->run (); frontiers = client; future = client->promise.get_future (); } lock_a.unlock (); result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception. lock_a.lock (); if (result) { pulls.clear (); } if (node->config.logging.network_logging ()) { if (!result) { BOOST_LOG (node->log) << boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % pulls.size () % connection_l->endpoint); } else { BOOST_LOG (node->log) << "frontier_req failed, reattempting"; } } } return result; } void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_a) { auto connection_l (connection (lock_a)); if (connection_l) { auto pull (pulls.front ()); pulls.pop_front (); if (mode != nano::bootstrap_mode::legacy) { // Check if pull is obsolete (head was processed) std::unique_lock<std::mutex> lock (lazy_mutex); auto transaction (node->store.tx_begin_read ()); while (!pulls.empty () && !pull.head.is_zero () && (lazy_blocks.find (pull.head) != lazy_blocks.end () || node->store.block_exists (transaction, pull.head))) { pull = pulls.front (); pulls.pop_front (); } } ++pulling; // The bulk_pull_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference // Dispatch request in an external thread in case it needs to be destroyed node->background ([connection_l, pull]() { auto client (std::make_shared<nano::bulk_pull_client> (connection_l, pull)); client->request (); }); } } void nano::bootstrap_attempt::request_push (std::unique_lock<std::mutex> & lock_a) { bool error (false); if (auto connection_shared = connection_frontier_request.lock ()) { std::future<bool> future; { auto client (std::make_shared<nano::bulk_push_client> (connection_shared)); client->start (); push = client; future = client->promise.get_future (); } lock_a.unlock (); error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception. lock_a.lock (); } if (node->config.logging.network_logging ()) { BOOST_LOG (node->log) << "Exiting bulk push client"; if (error) { BOOST_LOG (node->log) << "Bulk push client failed"; } } } bool nano::bootstrap_attempt::still_pulling () { assert (!mutex.try_lock ()); auto running (!stopped); auto more_pulls (!pulls.empty ()); auto still_pulling (pulling > 0); return running && (more_pulls || still_pulling); } void nano::bootstrap_attempt::run () { populate_connections (); std::unique_lock<std::mutex> lock (mutex); auto frontier_failure (true); while (!stopped && frontier_failure) { frontier_failure = request_frontier (lock); } // Shuffle pulls. release_assert (std::numeric_limits<CryptoPP::word32>::max () > pulls.size ()); if (!pulls.empty ()) { for (auto i = static_cast<CryptoPP::word32> (pulls.size () - 1); i > 0; --i) { auto k = nano::random_pool.GenerateWord32 (0, i); std::swap (pulls[i], pulls[k]); } } while (still_pulling ()) { while (still_pulling ()) { if (!pulls.empty ()) { if (!node->block_processor.full ()) { request_pull (lock); } else { condition.wait_for (lock, std::chrono::seconds (15)); } } else { condition.wait (lock); } } // Flushing may resolve forks which can add more pulls BOOST_LOG (node->log) << "Flushing unchecked blocks"; lock.unlock (); node->block_processor.flush (); lock.lock (); BOOST_LOG (node->log) << "Finished flushing unchecked blocks"; } if (!stopped) { BOOST_LOG (node->log) << "Completed pulls"; request_push (lock); runs_count++; // Start wallet lazy bootstrap if required if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap) { lock.unlock (); mode = nano::bootstrap_mode::wallet_lazy; wallet_run (); lock.lock (); } // Start lazy bootstrap if some lazy keys were inserted else if (runs_count < 3 && !lazy_finished () && !node->flags.disable_lazy_bootstrap) { lock.unlock (); mode = nano::bootstrap_mode::lazy; lazy_run (); lock.lock (); } } stopped = true; condition.notify_all (); idle.clear (); } std::shared_ptr<nano::bootstrap_client> nano::bootstrap_attempt::connection (std::unique_lock<std::mutex> & lock_a) { while (!stopped && idle.empty ()) { condition.wait (lock_a); } std::shared_ptr<nano::bootstrap_client> result; if (!idle.empty ()) { result = idle.back (); idle.pop_back (); } return result; } bool nano::bootstrap_attempt::consume_future (std::future<bool> & future_a) { bool result; try { result = future_a.get (); } catch (std::future_error &) { result = true; } return result; } struct block_rate_cmp { bool operator() (const std::shared_ptr<nano::bootstrap_client> & lhs, const std::shared_ptr<nano::bootstrap_client> & rhs) const { return lhs->block_rate () > rhs->block_rate (); } }; unsigned nano::bootstrap_attempt::target_connections (size_t pulls_remaining) { if (node->config.bootstrap_connections >= node->config.bootstrap_connections_max) { return std::max (1U, node->config.bootstrap_connections_max); } // Only scale up to bootstrap_connections_max for large pulls. double step = std::min (1.0, std::max (0.0, (double)pulls_remaining / bootstrap_connection_scale_target_blocks)); double target = (double)node->config.bootstrap_connections + (double)(node->config.bootstrap_connections_max - node->config.bootstrap_connections) * step; return std::max (1U, (unsigned)(target + 0.5f)); } void nano::bootstrap_attempt::populate_connections () { double rate_sum = 0.0; size_t num_pulls = 0; std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections; std::unordered_set<nano::tcp_endpoint> endpoints; { std::unique_lock<std::mutex> lock (mutex); num_pulls = pulls.size (); std::deque<std::weak_ptr<nano::bootstrap_client>> new_clients; for (auto & c : clients) { if (auto client = c.lock ()) { new_clients.push_back (client); endpoints.insert (client->endpoint); double elapsed_sec = client->elapsed_seconds (); auto blocks_per_sec = client->block_rate (); rate_sum += blocks_per_sec; if (client->elapsed_seconds () > bootstrap_connection_warmup_time_sec && client->block_count > 0) { sorted_connections.push (client); } // Force-stop the slowest peers, since they can take the whole bootstrap hostage by dribbling out blocks on the last remaining pull. // This is ~1.5kilobits/sec. if (elapsed_sec > bootstrap_minimum_termination_time_sec && blocks_per_sec < bootstrap_minimum_blocks_per_sec) { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Stopping slow peer %1% (elapsed sec %2%s > %3%s and %4% blocks per second < %5%)") % client->endpoint.address ().to_string () % elapsed_sec % bootstrap_minimum_termination_time_sec % blocks_per_sec % bootstrap_minimum_blocks_per_sec); } client->stop (true); } } } // Cleanup expired clients clients.swap (new_clients); } auto target = target_connections (num_pulls); // We only want to drop slow peers when more than 2/3 are active. 2/3 because 1/2 is too aggressive, and 100% rarely happens. // Probably needs more tuning. if (sorted_connections.size () >= (target * 2) / 3 && target >= 4) { // 4 -> 1, 8 -> 2, 16 -> 4, arbitrary, but seems to work well. auto drop = (int)roundf (sqrtf ((float)target - 2.0f)); if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Dropping %1% bulk pull peers, target connections %2%") % drop % target); } for (int i = 0; i < drop; i++) { auto client = sorted_connections.top (); if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Dropping peer with block rate %1%, block count %2% (%3%) ") % client->block_rate () % client->block_count % client->endpoint.address ().to_string ()); } client->stop (false); sorted_connections.pop (); } } if (node->config.logging.bulk_pull_logging ()) { std::unique_lock<std::mutex> lock (mutex); BOOST_LOG (node->log) << boost::str (boost::format ("Bulk pull connections: %1%, rate: %2% blocks/sec, remaining account pulls: %3%, total blocks: %4%") % connections.load () % (int)rate_sum % pulls.size () % (int)total_blocks.load ()); } if (connections < target) { auto delta = std::min ((target - connections) * 2, bootstrap_max_new_connections); // TODO - tune this better // Not many peers respond, need to try to make more connections than we need. for (auto i = 0u; i < delta; i++) { auto peer (node->peers.bootstrap_peer ()); auto endpoint (nano::tcp_endpoint (peer.address (), peer.port ())); if (peer != nano::endpoint (boost::asio::ip::address_v6::any (), 0) && endpoints.find (endpoint) == endpoints.end ()) { auto client (std::make_shared<nano::bootstrap_client> (node, shared_from_this (), endpoint)); client->run (); std::lock_guard<std::mutex> lock (mutex); clients.push_back (client); endpoints.insert (endpoint); } else if (connections == 0) { BOOST_LOG (node->log) << boost::str (boost::format ("Bootstrap stopped because there are no peers")); stopped = true; condition.notify_all (); } } } if (!stopped) { std::weak_ptr<nano::bootstrap_attempt> this_w (shared_from_this ()); node->alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (1), [this_w]() { if (auto this_l = this_w.lock ()) { this_l->populate_connections (); } }); } } void nano::bootstrap_attempt::add_connection (nano::endpoint const & endpoint_a) { auto client (std::make_shared<nano::bootstrap_client> (node, shared_from_this (), nano::tcp_endpoint (endpoint_a.address (), endpoint_a.port ()))); client->run (); } void nano::bootstrap_attempt::pool_connection (std::shared_ptr<nano::bootstrap_client> client_a) { { std::lock_guard<std::mutex> lock (mutex); if (!stopped && !client_a->pending_stop) { idle.push_front (client_a); } } condition.notify_all (); } void nano::bootstrap_attempt::stop () { std::lock_guard<std::mutex> lock (mutex); stopped = true; condition.notify_all (); for (auto i : clients) { if (auto client = i.lock ()) { client->socket->close (); } } if (auto i = frontiers.lock ()) { try { i->promise.set_value (true); } catch (std::future_error &) { } } if (auto i = push.lock ()) { try { i->promise.set_value (true); } catch (std::future_error &) { } } } void nano::bootstrap_attempt::add_pull (nano::pull_info const & pull) { { std::lock_guard<std::mutex> lock (mutex); pulls.push_back (pull); } condition.notify_all (); } void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a) { auto pull (pull_a); if (++pull.attempts < bootstrap_frontier_retry_limit) { std::lock_guard<std::mutex> lock (mutex); pulls.push_front (pull); condition.notify_all (); } else if (mode == nano::bootstrap_mode::lazy) { { // Retry for lazy pulls (not weak state block link assumptions) std::lock_guard<std::mutex> lock (mutex); pull.attempts++; pulls.push_back (pull); } condition.notify_all (); } else { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Failed to pull account %1% down to %2% after %3% attempts") % pull.account.to_account () % pull.end.to_string () % pull.attempts); } } } void nano::bootstrap_attempt::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end) { std::lock_guard<std::mutex> lock (mutex); bulk_push_targets.push_back (std::make_pair (head, end)); } void nano::bootstrap_attempt::lazy_start (nano::block_hash const & hash_a) { std::unique_lock<std::mutex> lock (lazy_mutex); // Add start blocks, limit 1024 (32k with disabled legacy bootstrap) size_t max_keys (node->flags.disable_legacy_bootstrap ? 32 * 1024 : 1024); if (lazy_keys.size () < max_keys && lazy_keys.find (hash_a) == lazy_keys.end () && lazy_blocks.find (hash_a) == lazy_blocks.end ()) { lazy_keys.insert (hash_a); lazy_pulls.push_back (hash_a); } } void nano::bootstrap_attempt::lazy_add (nano::block_hash const & hash_a) { // Add only unknown blocks assert (!lazy_mutex.try_lock ()); if (lazy_blocks.find (hash_a) == lazy_blocks.end ()) { lazy_pulls.push_back (hash_a); } } void nano::bootstrap_attempt::lazy_pull_flush () { std::unique_lock<std::mutex> lock (lazy_mutex); auto transaction (node->store.tx_begin_read ()); for (auto & pull_start : lazy_pulls) { // Recheck if block was already processed if (lazy_blocks.find (pull_start) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start)) { add_pull (nano::pull_info (pull_start, pull_start, nano::block_hash (0), lazy_max_pull_blocks)); } } lazy_pulls.clear (); } bool nano::bootstrap_attempt::lazy_finished () { bool result (true); auto transaction (node->store.tx_begin_read ()); std::unique_lock<std::mutex> lock (lazy_mutex); for (auto it (lazy_keys.begin ()), end (lazy_keys.end ()); it != end && !stopped;) { if (node->store.block_exists (transaction, *it)) { it = lazy_keys.erase (it); } else { result = false; break; // No need to increment `it` as we break above. } } // Finish lazy bootstrap without lazy pulls (in combination with still_pulling ()) if (!result && lazy_pulls.empty ()) { result = true; } return result; } void nano::bootstrap_attempt::lazy_clear () { assert (!lazy_mutex.try_lock ()); lazy_blocks.clear (); lazy_keys.clear (); lazy_pulls.clear (); lazy_state_unknown.clear (); lazy_balances.clear (); lazy_stopped = 0; } void nano::bootstrap_attempt::lazy_run () { populate_connections (); auto start_time (std::chrono::steady_clock::now ()); auto max_time (std::chrono::minutes (node->flags.disable_legacy_bootstrap ? 48 * 60 : 30)); std::unique_lock<std::mutex> lock (mutex); while ((still_pulling () || !lazy_finished ()) && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time) { unsigned iterations (0); while (still_pulling () && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time) { if (!pulls.empty ()) { if (!node->block_processor.full ()) { request_pull (lock); } else { condition.wait_for (lock, std::chrono::seconds (15)); } } else { condition.wait (lock); } ++iterations; // Flushing lazy pulls if (iterations % 100 == 0) { lock.unlock (); lazy_pull_flush (); lock.lock (); } } // Flushing may resolve forks which can add more pulls // Flushing lazy pulls lock.unlock (); node->block_processor.flush (); lazy_pull_flush (); lock.lock (); } if (!stopped) { BOOST_LOG (node->log) << "Completed lazy pulls"; std::unique_lock<std::mutex> lazy_lock (lazy_mutex); runs_count++; // Start wallet lazy bootstrap if required if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap) { pulls.clear (); lazy_clear (); mode = nano::bootstrap_mode::wallet_lazy; lock.unlock (); lazy_lock.unlock (); wallet_run (); lock.lock (); } // Fallback to legacy bootstrap else if (runs_count < 3 && !lazy_keys.empty () && !node->flags.disable_legacy_bootstrap) { pulls.clear (); lazy_clear (); mode = nano::bootstrap_mode::legacy; lock.unlock (); lazy_lock.unlock (); run (); lock.lock (); } } stopped = true; condition.notify_all (); idle.clear (); } bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_a, nano::account const & known_account_a, uint64_t total_blocks, bool block_expected) { bool stop_pull (false); if (mode != nano::bootstrap_mode::legacy && block_expected) { auto hash (block_a->hash ()); std::unique_lock<std::mutex> lock (lazy_mutex); // Processing new blocks if (lazy_blocks.find (hash) == lazy_blocks.end ()) { // Search block in ledger (old) auto transaction (node->store.tx_begin_read ()); if (!node->store.block_exists (transaction, block_a->type (), hash)) { nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ()); nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown); node->block_processor.add (info); // Search for new dependencies if (!block_a->source ().is_zero () && !node->store.block_exists (transaction, block_a->source ())) { lazy_add (block_a->source ()); } else if (block_a->type () == nano::block_type::send) { // Calculate balance for legacy send blocks std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a)); if (block_l != nullptr) { balance = block_l->hashables.balance.number (); } } else if (block_a->type () == nano::block_type::state) { std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a)); if (block_l != nullptr) { balance = block_l->hashables.balance.number (); nano::block_hash link (block_l->hashables.link); // If link is not epoch link or 0. And if block from link unknown if (!link.is_zero () && link != node->ledger.epoch_link && lazy_blocks.find (link) == lazy_blocks.end () && !node->store.block_exists (transaction, link)) { nano::block_hash previous (block_l->hashables.previous); // If state block previous is 0 then source block required if (previous.is_zero ()) { lazy_add (link); } // In other cases previous block balance required to find out subtype of state block else if (node->store.block_exists (transaction, previous)) { nano::amount prev_balance (node->ledger.balance (transaction, previous)); if (prev_balance.number () <= balance) { lazy_add (link); } } // Search balance of already processed previous blocks else if (lazy_blocks.find (previous) != lazy_blocks.end ()) { auto previous_balance (lazy_balances.find (previous)); if (previous_balance != lazy_balances.end ()) { if (previous_balance->second <= balance) { lazy_add (link); } lazy_balances.erase (previous_balance); } } // Insert in unknown state blocks if previous wasn't already processed else { lazy_state_unknown.insert (std::make_pair (previous, std::make_pair (link, balance))); } } } } lazy_blocks.insert (hash); // Adding lazy balances if (total_blocks == 0) { lazy_balances.insert (std::make_pair (hash, balance)); } // Removing lazy balances if (!block_a->previous ().is_zero () && lazy_balances.find (block_a->previous ()) != lazy_balances.end ()) { lazy_balances.erase (block_a->previous ()); } } // Drop bulk_pull if block is already known (ledger) else { // Disabled until server rewrite // stop_pull = true; // Force drop lazy bootstrap connection for long bulk_pull if (total_blocks > lazy_max_pull_blocks) { stop_pull = true; } } //Search unknown state blocks balances auto find_state (lazy_state_unknown.find (hash)); if (find_state != lazy_state_unknown.end ()) { auto next_block (find_state->second); lazy_state_unknown.erase (hash); // Retrieve balance for previous state blocks if (block_a->type () == nano::block_type::state) { std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a)); if (block_l->hashables.balance.number () <= next_block.second) { lazy_add (next_block.first); } } // Retrieve balance for previous legacy send blocks else if (block_a->type () == nano::block_type::send) { std::shared_ptr<nano::send_block> block_l (std::static_pointer_cast<nano::send_block> (block_a)); if (block_l->hashables.balance.number () <= next_block.second) { lazy_add (next_block.first); } } // Weak assumption for other legacy block types else { // Disabled } } } // Drop bulk_pull if block is already known (processed set) else { // Disabled until server rewrite // stop_pull = true; // Force drop lazy bootstrap connection for long bulk_pull if (total_blocks > lazy_max_pull_blocks) { stop_pull = true; } } } else if (mode != nano::bootstrap_mode::legacy) { // Drop connection with unexpected block for lazy bootstrap stop_pull = true; } else { nano::unchecked_info info (block_a, known_account_a, 0, nano::signature_verification::unknown); node->block_processor.add (info); } return stop_pull; } void nano::bootstrap_attempt::request_pending (std::unique_lock<std::mutex> & lock_a) { auto connection_l (connection (lock_a)); if (connection_l) { auto account (wallet_accounts.front ()); wallet_accounts.pop_front (); ++pulling; // The bulk_pull_account_client destructor attempt to requeue_pull which can cause a deadlock if this is the last reference // Dispatch request in an external thread in case it needs to be destroyed node->background ([connection_l, account]() { auto client (std::make_shared<nano::bulk_pull_account_client> (connection_l, account)); client->request (); }); } } void nano::bootstrap_attempt::requeue_pending (nano::account const & account_a) { auto account (account_a); { std::lock_guard<std::mutex> lock (mutex); wallet_accounts.push_front (account); condition.notify_all (); } } void nano::bootstrap_attempt::wallet_start (std::deque<nano::account> & accounts_a) { std::lock_guard<std::mutex> lock (mutex); wallet_accounts.swap (accounts_a); } bool nano::bootstrap_attempt::wallet_finished () { assert (!mutex.try_lock ()); auto running (!stopped); auto more_accounts (!wallet_accounts.empty ()); auto still_pulling (pulling > 0); return running && (more_accounts || still_pulling); } void nano::bootstrap_attempt::wallet_run () { populate_connections (); auto start_time (std::chrono::steady_clock::now ()); auto max_time (std::chrono::minutes (10)); std::unique_lock<std::mutex> lock (mutex); while (wallet_finished () && std::chrono::steady_clock::now () - start_time < max_time) { if (!wallet_accounts.empty ()) { request_pending (lock); } else { condition.wait (lock); } } if (!stopped) { BOOST_LOG (node->log) << "Completed wallet lazy pulls"; runs_count++; // Start lazy bootstrap if some lazy keys were inserted if (!lazy_finished ()) { lock.unlock (); lazy_run (); lock.lock (); } } stopped = true; condition.notify_all (); idle.clear (); } nano::bootstrap_initiator::bootstrap_initiator (nano::node & node_a) : node (node_a), stopped (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::bootstrap_initiator); run_bootstrap (); }) { } nano::bootstrap_initiator::~bootstrap_initiator () { stop (); thread.join (); } void nano::bootstrap_initiator::bootstrap () { std::unique_lock<std::mutex> lock (mutex); if (!stopped && attempt == nullptr) { node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out); attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); condition.notify_all (); } } void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bool add_to_peers) { if (add_to_peers) { node.peers.insert (nano::map_endpoint_to_v6 (endpoint_a), nano::protocol_version); } std::unique_lock<std::mutex> lock (mutex); if (!stopped) { while (attempt != nullptr) { attempt->stop (); condition.wait (lock); } node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out); attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); attempt->add_connection (endpoint_a); condition.notify_all (); } } void nano::bootstrap_initiator::bootstrap_lazy (nano::block_hash const & hash_a, bool force) { { std::unique_lock<std::mutex> lock (mutex); if (force) { while (attempt != nullptr) { attempt->stop (); condition.wait (lock); } } node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_lazy, nano::stat::dir::out); if (attempt == nullptr) { attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); attempt->mode = nano::bootstrap_mode::lazy; } attempt->lazy_start (hash_a); } condition.notify_all (); } void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & accounts_a) { { std::unique_lock<std::mutex> lock (mutex); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_wallet_lazy, nano::stat::dir::out); if (attempt == nullptr) { attempt = std::make_shared<nano::bootstrap_attempt> (node.shared ()); attempt->mode = nano::bootstrap_mode::wallet_lazy; } attempt->wallet_start (accounts_a); } condition.notify_all (); } void nano::bootstrap_initiator::run_bootstrap () { std::unique_lock<std::mutex> lock (mutex); while (!stopped) { if (attempt != nullptr) { lock.unlock (); if (attempt->mode == nano::bootstrap_mode::legacy) { attempt->run (); } else if (attempt->mode == nano::bootstrap_mode::lazy) { attempt->lazy_run (); } else { attempt->wallet_run (); } lock.lock (); attempt = nullptr; condition.notify_all (); } else { condition.wait (lock); } } } void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a) { std::lock_guard<std::mutex> lock (mutex); observers.push_back (observer_a); } bool nano::bootstrap_initiator::in_progress () { return current_attempt () != nullptr; } std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt () { std::lock_guard<std::mutex> lock (mutex); return attempt; } void nano::bootstrap_initiator::stop () { { std::unique_lock<std::mutex> lock (mutex); stopped = true; if (attempt != nullptr) { attempt->stop (); } } condition.notify_all (); } void nano::bootstrap_initiator::notify_listeners (bool in_progress_a) { for (auto & i : observers) { i (in_progress_a); } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_initiator & bootstrap_initiator, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (bootstrap_initiator.mutex); count = bootstrap_initiator.observers.size (); } auto sizeof_element = sizeof (decltype (bootstrap_initiator.observers)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "observers", count, sizeof_element })); return composite; } } nano::bootstrap_listener::bootstrap_listener (boost::asio::io_context & io_ctx_a, uint16_t port_a, nano::node & node_a) : acceptor (io_ctx_a), local (boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port_a)), io_ctx (io_ctx_a), node (node_a), defer_acceptor (io_ctx_a) { } void nano::bootstrap_listener::start () { acceptor.open (local.protocol ()); acceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true)); boost::system::error_code ec; acceptor.bind (local, ec); if (ec) { BOOST_LOG (node.log) << boost::str (boost::format ("Error while binding for bootstrap on port %1%: %2%") % local.port () % ec.message ()); throw std::runtime_error (ec.message ()); } acceptor.listen (); accept_connection (); } void nano::bootstrap_listener::stop () { decltype (connections) connections_l; { std::lock_guard<std::mutex> lock (mutex); on = false; connections_l.swap (connections); } acceptor.close (); for (auto & i : connections_l) { auto connection (i.second.lock ()); if (connection) { connection->socket->close (); } } } void nano::bootstrap_listener::accept_connection () { if (acceptor.is_open ()) { if (connections.size () < node.config.bootstrap_connections_max) { auto socket (std::make_shared<nano::socket> (node.shared ())); acceptor.async_accept (socket->socket_m, [this, socket](boost::system::error_code const & ec) { accept_action (ec, socket); }); } else { BOOST_LOG (node.log) << boost::str (boost::format ("Unable to accept new TCP network sockets (have %1% concurrent connections, limit of %2%), will try to accept again in 1s") % connections.size () % node.config.bootstrap_connections_max); defer_acceptor.expires_after (std::chrono::seconds (1)); defer_acceptor.async_wait ([this](const boost::system::error_code & ec) { /* * There should be no other call points that can invoke * accept_connect() after starting the listener, so if we * get an error from the I/O context, something is probably * wrong. */ if (!ec) { accept_connection (); } }); } } } void nano::bootstrap_listener::accept_action (boost::system::error_code const & ec, std::shared_ptr<nano::socket> socket_a) { if (!ec) { auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ())); { std::lock_guard<std::mutex> lock (mutex); if (acceptor.is_open ()) { connections[connection.get ()] = connection; connection->receive (); } } accept_connection (); } else { BOOST_LOG (node.log) << boost::str (boost::format ("Error while accepting bootstrap connections: %1%") % ec.message ()); } } boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint () { return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), local.port ()); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_listener & bootstrap_listener, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (bootstrap_listener.mutex); count = bootstrap_listener.connections.size (); } auto sizeof_element = sizeof (decltype (bootstrap_listener.connections)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "connections", count, sizeof_element })); return composite; } } nano::bootstrap_server::~bootstrap_server () { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << "Exiting bootstrap server"; } std::lock_guard<std::mutex> lock (node->bootstrap.mutex); node->bootstrap.connections.erase (this); } nano::bootstrap_server::bootstrap_server (std::shared_ptr<nano::socket> socket_a, std::shared_ptr<nano::node> node_a) : receive_buffer (std::make_shared<std::vector<uint8_t>> ()), socket (socket_a), node (node_a) { receive_buffer->resize (128); } void nano::bootstrap_server::receive () { auto this_l (shared_from_this ()); socket->async_read (receive_buffer, 8, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->receive_header_action (ec, size_a); }); } void nano::bootstrap_server::receive_header_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { assert (size_a == 8); nano::bufferstream type_stream (receive_buffer->data (), size_a); auto error (false); nano::message_header header (error, type_stream); if (!error) { switch (header.type) { case nano::message_type::bulk_pull: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull, nano::stat::dir::in); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_bulk_pull_action (ec, size_a, header); }); break; } case nano::message_type::bulk_pull_account: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_pull_account, nano::stat::dir::in); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_bulk_pull_account_action (ec, size_a, header); }); break; } case nano::message_type::frontier_req: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_req, nano::stat::dir::in); auto this_l (shared_from_this ()); socket->async_read (receive_buffer, header.payload_length_bytes (), [this_l, header](boost::system::error_code const & ec, size_t size_a) { this_l->receive_frontier_req_action (ec, size_a, header); }); break; } case nano::message_type::bulk_push: { node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::bulk_push, nano::stat::dir::in); add_request (std::unique_ptr<nano::message> (new nano::bulk_push (header))); break; } default: { if (node->config.logging.network_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Received invalid type from bootstrap connection %1%") % static_cast<uint8_t> (header.type)); } break; } } } } else { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Error while receiving type: %1%") % ec.message ()); } } } void nano::bootstrap_server::receive_bulk_pull_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::bulk_pull> request (new nano::bulk_pull (error, stream, header_a)); if (!error) { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Received bulk pull for %1% down to %2%, maximum of %3%") % request->start.to_string () % request->end.to_string () % (request->count ? request->count : std::numeric_limits<double>::infinity ())); } add_request (std::unique_ptr<nano::message> (request.release ())); receive (); } } } void nano::bootstrap_server::receive_bulk_pull_account_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); assert (size_a == header_a.payload_length_bytes ()); nano::bufferstream stream (receive_buffer->data (), size_a); std::unique_ptr<nano::bulk_pull_account> request (new nano::bulk_pull_account (error, stream, header_a)); if (!error) { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Received bulk pull account for %1% with a minimum amount of %2%") % request->account.to_account () % nano::amount (request->minimum_amount).format_balance (nano::Mxrb_ratio, 10, true)); } add_request (std::unique_ptr<nano::message> (request.release ())); receive (); } } } void nano::bootstrap_server::receive_frontier_req_action (boost::system::error_code const & ec, size_t size_a, nano::message_header const & header_a) { if (!ec) { auto error (false); nano::bufferstream stream (receive_buffer->data (), header_a.payload_length_bytes ()); std::unique_ptr<nano::frontier_req> request (new nano::frontier_req (error, stream, header_a)); if (!error) { if (node->config.logging.bulk_pull_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Received frontier request for %1% with age %2%") % request->start.to_string () % request->age); } add_request (std::unique_ptr<nano::message> (request.release ())); receive (); } } else { if (node->config.logging.network_logging ()) { BOOST_LOG (node->log) << boost::str (boost::format ("Error sending receiving frontier request: %1%") % ec.message ()); } } } void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a) { std::lock_guard<std::mutex> lock (mutex); auto start (requests.empty ()); requests.push (std::move (message_a)); if (start) { run_next (); } } void nano::bootstrap_server::finish_request () { std::lock_guard<std::mutex> lock (mutex); requests.pop (); if (!requests.empty ()) { run_next (); } } namespace { class request_response_visitor : public nano::message_visitor { public: request_response_visitor (std::shared_ptr<nano::bootstrap_server> connection_a) : connection (connection_a) { } virtual ~request_response_visitor () = default; void keepalive (nano::keepalive const &) override { assert (false); } void publish (nano::publish const &) override { assert (false); } void confirm_req (nano::confirm_req const &) override { assert (false); } void confirm_ack (nano::confirm_ack const &) override { assert (false); } void bulk_pull (nano::bulk_pull const &) override { auto response (std::make_shared<nano::bulk_pull_server> (connection, std::unique_ptr<nano::bulk_pull> (static_cast<nano::bulk_pull *> (connection->requests.front ().release ())))); response->send_next (); } void bulk_pull_account (nano::bulk_pull_account const &) override { auto response (std::make_shared<nano::bulk_pull_account_server> (connection, std::unique_ptr<nano::bulk_pull_account> (static_cast<nano::bulk_pull_account *> (connection->requests.front ().release ())))); response->send_frontier (); } void bulk_push (nano::bulk_push const &) override { auto response (std::make_shared<nano::bulk_push_server> (connection)); response->receive (); } void frontier_req (nano::frontier_req const &) override { auto response (std::make_shared<nano::frontier_req_server> (connection, std::unique_ptr<nano::frontier_req> (static_cast<nano::frontier_req *> (connection->requests.front ().release ())))); response->send_next (); } void node_id_handshake (nano::node_id_handshake const &) override { assert (false); } std::shared_ptr<nano::bootstrap_server> connection; }; } void nano::bootstrap_server::run_next () { assert (!requests.empty ()); request_response_visitor visitor (shared_from_this ()); requests.front ()->visit (visitor); } /** * Handle a request for the pull of all blocks associated with an account * The account is supplied as the "start" member, and the final block to * send is the "end" member. The "start" member may also be a block * hash, in which case the that hash is used as the start of a chain * to send. To determine if "start" is interpretted as an account or * hash, the ledger is checked to see if the block specified exists, * if not then it is interpretted as an account. * * Additionally, if "start" is specified as a block hash the range * is inclusive of that block hash, that is the range will be: * [start, end); In the case that a block hash is not specified the * range will be exclusive of the frontier for that account with * a range of (frontier, end) */ void nano::bulk_pull_server::set_current_end () { include_start = false; assert (request != nullptr); auto transaction (connection->node->store.tx_begin_read ()); if (!connection->node->store.block_exists (transaction, request->end)) { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull end block doesn't exist: %1%, sending everything") % request->end.to_string ()); } request->end.clear (); } if (connection->node->store.block_exists (transaction, request->start)) { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Bulk pull request for block hash: %1%") % request->start.to_string ()); } current = request->start; include_start = true; } else { nano::account_info info; auto no_address (connection->node->store.account_get (transaction, request->start, info)); if (no_address) { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Request for unknown account: %1%") % request->start.to_account ()); } current = request->end; } else { current = info.head; if (!request->end.is_zero ()) { auto account (connection->node->ledger.account (transaction, request->end)); if (account != request->start) { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Request for block that is not on account chain: %1% not on %2%") % request->end.to_string () % request->start.to_account ()); } current = request->end; } } } } sent_count = 0; if (request->is_count_present ()) { max_count = request->count; } else { max_count = 0; } } void nano::bulk_pull_server::send_next () { auto block (get_next ()); if (block != nullptr) { { send_buffer->clear (); nano::vectorstream stream (*send_buffer); nano::serialize_block (stream, *block); } auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending block: %1%") % block->hash ().to_string ()); } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } else { send_finished (); } } std::shared_ptr<nano::block> nano::bulk_pull_server::get_next () { std::shared_ptr<nano::block> result; bool send_current = false, set_current_to_end = false; /* * Determine if we should reply with a block * * If our cursor is on the final block, we should signal that we * are done by returning a null result. * * Unless we are including the "start" member and this is the * start member, then include it anyway. */ if (current != request->end) { send_current = true; } else if (current == request->end && include_start == true) { send_current = true; /* * We also need to ensure that the next time * are invoked that we return a null result */ set_current_to_end = true; } /* * Account for how many blocks we have provided. If this * exceeds the requested maximum, return an empty object * to signal the end of results */ if (max_count != 0 && sent_count >= max_count) { send_current = false; } if (send_current) { auto transaction (connection->node->store.tx_begin_read ()); result = connection->node->store.block_get (transaction, current); if (result != nullptr && set_current_to_end == false) { auto previous (result->previous ()); if (!previous.is_zero ()) { current = previous; } else { current = request->end; } } else { current = request->end; } sent_count++; } /* * Once we have processed "get_next()" once our cursor is no longer on * the "start" member, so this flag is not relevant is always false. */ include_start = false; return result; } void nano::bulk_pull_server::sent_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { send_next (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ()); } } } void nano::bulk_pull_server::send_finished () { send_buffer->clear (); send_buffer->push_back (static_cast<uint8_t> (nano::block_type::not_a_block)); auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Bulk sending finished"; } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->no_block_sent (ec, size_a); }); } void nano::bulk_pull_server::no_block_sent (boost::system::error_code const & ec, size_t size_a) { if (!ec) { assert (size_a == 1); connection->finish_request (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Unable to send not-a-block"; } } } nano::bulk_pull_server::bulk_pull_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull> request_a) : connection (connection_a), request (std::move (request_a)), send_buffer (std::make_shared<std::vector<uint8_t>> ()) { set_current_end (); } /** * Bulk pull blocks related to an account */ void nano::bulk_pull_account_server::set_params () { assert (request != nullptr); /* * Parse the flags */ invalid_request = false; pending_include_address = false; pending_address_only = false; if (request->flags == nano::bulk_pull_account_flags::pending_address_only) { pending_address_only = true; } else if (request->flags == nano::bulk_pull_account_flags::pending_hash_amount_and_address) { /** ** This is the same as "pending_hash_and_amount" but with the ** sending address appended, for UI purposes mainly. **/ pending_include_address = true; } else if (request->flags == nano::bulk_pull_account_flags::pending_hash_and_amount) { /** The defaults are set above **/ } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Invalid bulk_pull_account flags supplied %1%") % static_cast<uint8_t> (request->flags)); } invalid_request = true; return; } /* * Initialize the current item from the requested account */ current_key.account = request->account; current_key.hash = 0; } void nano::bulk_pull_account_server::send_frontier () { /* * This function is really the entry point into this class, * so handle the invalid_request case by terminating the * request without any response */ if (invalid_request) { connection->finish_request (); return; } /* * Supply the account frontier */ /** ** Establish a database transaction **/ auto stream_transaction (connection->node->store.tx_begin_read ()); /** ** Get account balance and frontier block hash **/ auto account_frontier_hash (connection->node->ledger.latest (stream_transaction, request->account)); auto account_frontier_balance_int (connection->node->ledger.account_balance (stream_transaction, request->account)); nano::uint128_union account_frontier_balance (account_frontier_balance_int); /** ** Write the frontier block hash and balance into a buffer **/ send_buffer->clear (); { nano::vectorstream output_stream (*send_buffer); write (output_stream, account_frontier_hash.bytes); write (output_stream, account_frontier_balance.bytes); } /** ** Send the buffer to the requestor **/ auto this_l (shared_from_this ()); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } void nano::bulk_pull_account_server::send_next_block () { /* * Get the next item from the queue, it is a tuple with the key (which * contains the account and hash) and data (which contains the amount) */ auto block_data (get_next ()); auto block_info_key (block_data.first.get ()); auto block_info (block_data.second.get ()); if (block_info_key != nullptr) { /* * If we have a new item, emit it to the socket */ send_buffer->clear (); if (pending_address_only) { nano::vectorstream output_stream (*send_buffer); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending address: %1%") % block_info->source.to_string ()); } write (output_stream, block_info->source.bytes); } else { nano::vectorstream output_stream (*send_buffer); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending block: %1%") % block_info_key->hash.to_string ()); } write (output_stream, block_info_key->hash.bytes); write (output_stream, block_info->amount.bytes); if (pending_include_address) { /** ** Write the source address as well, if requested **/ write (output_stream, block_info->source.bytes); } } auto this_l (shared_from_this ()); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } else { /* * Otherwise, finalize the connection */ if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Done sending blocks")); } send_finished (); } } std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> nano::bulk_pull_account_server::get_next () { std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info>> result; while (true) { /* * For each iteration of this loop, establish and then * destroy a database transaction, to avoid locking the * database for a prolonged period. */ auto stream_transaction (connection->node->store.tx_begin_read ()); auto stream (connection->node->store.pending_begin (stream_transaction, current_key)); if (stream == nano::store_iterator<nano::pending_key, nano::pending_info> (nullptr)) { break; } nano::pending_key key (stream->first); nano::pending_info info (stream->second); /* * Get the key for the next value, to use in the next call or iteration */ current_key.account = key.account; current_key.hash = key.hash.number () + 1; /* * Finish up if the response is for a different account */ if (key.account != request->account) { break; } /* * Skip entries where the amount is less than the requested * minimum */ if (info.amount < request->minimum_amount) { continue; } /* * If the pending_address_only flag is set, de-duplicate the * responses. The responses are the address of the sender, * so they are are part of the pending table's information * and not key, so we have to de-duplicate them manually. */ if (pending_address_only) { if (!deduplication.insert (info.source).second) { /* * If the deduplication map gets too * large, clear it out. This may * result in some duplicates getting * sent to the client, but we do not * want to commit too much memory */ if (deduplication.size () > 4096) { deduplication.clear (); } continue; } } result.first = std::unique_ptr<nano::pending_key> (new nano::pending_key (key)); result.second = std::unique_ptr<nano::pending_info> (new nano::pending_info (info)); break; } return result; } void nano::bulk_pull_account_server::sent_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { send_next_block (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Unable to bulk send block: %1%") % ec.message ()); } } } void nano::bulk_pull_account_server::send_finished () { /* * The "bulk_pull_account" final sequence is a final block of all * zeros. If we are sending only account public keys (with the * "pending_address_only" flag) then it will be 256-bits of zeros, * otherwise it will be either 384-bits of zeros (if the * "pending_include_address" flag is not set) or 640-bits of zeros * (if that flag is set). */ send_buffer->clear (); { nano::vectorstream output_stream (*send_buffer); nano::uint256_union account_zero (0); nano::uint128_union balance_zero (0); write (output_stream, account_zero.bytes); if (!pending_address_only) { write (output_stream, balance_zero.bytes); if (pending_include_address) { write (output_stream, account_zero.bytes); } } } auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Bulk sending for an account finished"; } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->complete (ec, size_a); }); } void nano::bulk_pull_account_server::complete (boost::system::error_code const & ec, size_t size_a) { if (!ec) { if (pending_address_only) { assert (size_a == 32); } else { if (pending_include_address) { assert (size_a == 80); } else { assert (size_a == 48); } } connection->finish_request (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Unable to pending-as-zero"; } } } nano::bulk_pull_account_server::bulk_pull_account_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::bulk_pull_account> request_a) : connection (connection_a), request (std::move (request_a)), send_buffer (std::make_shared<std::vector<uint8_t>> ()), current_key (0, 0) { /* * Setup the streaming response for the first call to "send_frontier" and "send_next_block" */ set_params (); } nano::bulk_push_server::bulk_push_server (std::shared_ptr<nano::bootstrap_server> const & connection_a) : receive_buffer (std::make_shared<std::vector<uint8_t>> ()), connection (connection_a) { receive_buffer->resize (256); } void nano::bulk_push_server::receive () { if (connection->node->bootstrap_initiator.in_progress ()) { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Aborting bulk_push because a bootstrap attempt is in progress"; } } else { auto this_l (shared_from_this ()); connection->socket->async_read (receive_buffer, 1, [this_l](boost::system::error_code const & ec, size_t size_a) { if (!ec) { this_l->received_type (); } else { if (this_l->connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (this_l->connection->node->log) << boost::str (boost::format ("Error receiving block type: %1%") % ec.message ()); } } }); } } void nano::bulk_push_server::received_type () { auto this_l (shared_from_this ()); nano::block_type type (static_cast<nano::block_type> (receive_buffer->data ()[0])); switch (type) { case nano::block_type::send: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::send, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::send_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::receive: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::receive, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::receive_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::open: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::open, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::open_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::change: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::change, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::change_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::state: { connection->node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::state_block, nano::stat::dir::in); connection->socket->async_read (receive_buffer, nano::state_block::size, [this_l, type](boost::system::error_code const & ec, size_t size_a) { this_l->received_block (ec, size_a, type); }); break; } case nano::block_type::not_a_block: { connection->finish_request (); break; } default: { if (connection->node->config.logging.network_packet_logging ()) { BOOST_LOG (connection->node->log) << "Unknown type received as block type"; } break; } } } void nano::bulk_push_server::received_block (boost::system::error_code const & ec, size_t size_a, nano::block_type type_a) { if (!ec) { nano::bufferstream stream (receive_buffer->data (), size_a); auto block (nano::deserialize_block (stream, type_a)); if (block != nullptr && !nano::work_validate (*block)) { connection->node->process_active (std::move (block)); receive (); } else { if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << "Error deserializing block received from pull request"; } } } } nano::frontier_req_server::frontier_req_server (std::shared_ptr<nano::bootstrap_server> const & connection_a, std::unique_ptr<nano::frontier_req> request_a) : connection (connection_a), current (request_a->start.number () - 1), frontier (0), request (std::move (request_a)), send_buffer (std::make_shared<std::vector<uint8_t>> ()), count (0) { next (); } void nano::frontier_req_server::send_next () { if (!current.is_zero () && count <= request->count) { { send_buffer->clear (); nano::vectorstream stream (*send_buffer); write (stream, current.bytes); write (stream, frontier.bytes); } auto this_l (shared_from_this ()); if (connection->node->config.logging.bulk_pull_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Sending frontier for %1% %2%") % current.to_account () % frontier.to_string ()); } next (); connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->sent_action (ec, size_a); }); } else { send_finished (); } } void nano::frontier_req_server::send_finished () { { send_buffer->clear (); nano::vectorstream stream (*send_buffer); nano::uint256_union zero (0); write (stream, zero.bytes); write (stream, zero.bytes); } auto this_l (shared_from_this ()); if (connection->node->config.logging.network_logging ()) { BOOST_LOG (connection->node->log) << "Frontier sending finished"; } connection->socket->async_write (send_buffer, [this_l](boost::system::error_code const & ec, size_t size_a) { this_l->no_block_sent (ec, size_a); }); } void nano::frontier_req_server::no_block_sent (boost::system::error_code const & ec, size_t size_a) { if (!ec) { connection->finish_request (); } else { if (connection->node->config.logging.network_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error sending frontier finish: %1%") % ec.message ()); } } } void nano::frontier_req_server::sent_action (boost::system::error_code const & ec, size_t size_a) { if (!ec) { count++; send_next (); } else { if (connection->node->config.logging.network_logging ()) { BOOST_LOG (connection->node->log) << boost::str (boost::format ("Error sending frontier pair: %1%") % ec.message ()); } } } void nano::frontier_req_server::next () { // Filling accounts deque to prevent often read transactions if (accounts.empty ()) { auto now (nano::seconds_since_epoch ()); bool skip_old (request->age != std::numeric_limits<decltype (request->age)>::max ()); size_t max_size (128); auto transaction (connection->node->store.tx_begin_read ()); for (auto i (connection->node->store.latest_begin (transaction, current.number () + 1)), n (connection->node->store.latest_end ()); i != n && accounts.size () != max_size; ++i) { nano::account_info info (i->second); if (!skip_old || (now - info.modified) <= request->age) { accounts.push_back (std::make_pair (nano::account (i->first), info.head)); } } /* If loop breaks before max_size, then latest_end () is reached Add empty record to finish frontier_req_server */ if (accounts.size () != max_size) { accounts.push_back (std::make_pair (nano::account (0), nano::block_hash (0))); } } // Retrieving accounts from deque auto account_pair (accounts.front ()); accounts.pop_front (); current = account_pair.first; frontier = account_pair.second; }
1
14,895
Should something use the lock passed in?
nanocurrency-nano-node
cpp
@@ -101,7 +101,7 @@ class Docker(base.Base): When attempting to utilize a container image with `systemd`_ as your init system inside the container to simulate a real machine, make sure to set - the ``privileged``, ``volume_mounts``, ``command``, and ``environment`` + the ``privileged``, ``volumes``, ``command``, and ``environment`` values. An example using the ``centos:7`` image is below: .. note:: Do note that running containers in privileged mode is considerably
1
# Copyright (c) 2015-2018 Cisco Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import absolute_import import os from molecule import logger from molecule.driver import base from molecule.util import sysexit_with_message log = logger.get_logger(__name__) class Docker(base.Base): """ The class responsible for managing `Docker`_ containers. `Docker`_ is the default driver used in Molecule. Molecule leverages Ansible's `docker_container`_ module, by mapping variables from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``. .. _`docker_container`: https://docs.ansible.com/ansible/latest/docker_container_module.html .. _`Docker Security Configuration`: https://docs.docker.com/engine/reference/run/#security-configuration .. code-block:: yaml driver: name: docker platforms: - name: instance hostname: instance image: image_name:tag dockerfile: Dockerfile.j2 pull: True|False pre_build_image: True|False registry: url: registry.example.com credentials: username: $USERNAME password: $PASSWORD email: [email protected] override_command: True|False command: sleep infinity pid_mode: host privileged: True|False security_opts: - seccomp=unconfined volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro tmpfs: - /tmp - /run capabilities: - SYS_ADMIN exposed_ports: - 53/udp - 53/tcp published_ports: - 0.0.0.0:8053:53/udp - 0.0.0.0:8053:53/tcp ulimits: - nofile:262144:262144 dns_servers: - 8.8.8.8 networks: - name: foo - name: bar network_mode: host purge_networks: true docker_host: tcp://localhost:12376 env: FOO: bar restart_policy: on-failure restart_retries: 1 buildargs: http_proxy: http://proxy.example.com:8080/ If specifying the `CMD`_ directive in your ``Dockerfile.j2`` or consuming a built image which declares a ``CMD`` directive, then you must set ``override_command: False``. Otherwise, Molecule takes care to honour the value of the ``command`` key or uses the default of ``bash -c "while true; do sleep 10000; done"`` to run the container until it is provisioned. When attempting to utilize a container image with `systemd`_ as your init system inside the container to simulate a real machine, make sure to set the ``privileged``, ``volume_mounts``, ``command``, and ``environment`` values. An example using the ``centos:7`` image is below: .. note:: Do note that running containers in privileged mode is considerably less secure. For details, please reference `Docker Security Configuration`_ .. code-block:: yaml platforms: - name: instance image: centos:7 privileged: true volume_mounts: - "/sys/fs/cgroup:/sys/fs/cgroup:rw" command: "/usr/sbin/init" environment: container: docker .. code-block:: bash $ pip install molecule[docker] When pulling from a private registry, the username and password must be exported as environment variables in the current shell. The only supported variables are $USERNAME and $PASSWORD. .. code-block:: bash $ export USERNAME=foo $ export PASSWORD=bar Provide a list of files Molecule will preserve, relative to the scenario ephemeral directory, after any ``destroy`` subcommand execution. .. code-block:: yaml driver: name: docker safe_files: - foo .. _`Docker`: https://www.docker.com .. _`systemd`: https://www.freedesktop.org/wiki/Software/systemd/ .. _`CMD`: https://docs.docker.com/engine/reference/builder/#cmd """ # noqa def __init__(self, config): super(Docker, self).__init__(config) self._name = 'docker' @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def login_cmd_template(self): return ('docker exec ' '-e COLUMNS={columns} ' '-e LINES={lines} ' '-e TERM=bash ' '-e TERM=xterm ' '-ti {instance} bash') @property def default_safe_files(self): return [ os.path.join(self._config.scenario.ephemeral_directory, 'Dockerfile') ] @property def default_ssh_connection_options(self): return [] def login_options(self, instance_name): return {'instance': instance_name} def ansible_connection_options(self, instance_name): return {'ansible_connection': 'docker'} def sanity_checks(self): """Implement Docker driver sanity checks.""" if self._config.state.sanity_checked: return log.info("Sanity checks: '{}'".format(self._name)) try: from ansible.module_utils.docker_common import HAS_DOCKER_PY if not HAS_DOCKER_PY: msg = ('Missing Docker driver dependency. Please ' "install via 'molecule[docker]' or refer to " 'your INSTALL.rst driver documentation file') sysexit_with_message(msg) except ImportError: msg = ('Unable to import Ansible. Please ensure ' 'that Ansible is installed') sysexit_with_message(msg) try: import docker import requests docker_client = docker.from_env() docker_client.ping() except requests.exceptions.ConnectionError: msg = ('Unable to contact the Docker daemon. ' 'Please refer to https://docs.docker.com/config/daemon/ ' 'for managing the daemon') sysexit_with_message(msg) self._config.state.change_state('sanity_checked', True)
1
9,308
Is this dependant on some version of Docker? Do we need to include both or `volumes` is deprecated (or just wrong!?)?
ansible-community-molecule
py
@@ -1306,7 +1306,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission try (ZkShardTerms zkShardTerms = new ZkShardTerms(collectionName, slice.getName(), zkController.getZkClient())) { // if an active replica is the leader, then all is fine already Replica leader = slice.getLeader(); - if (leader != null && leader.getState() == State.ACTIVE) { + if (leader != null && leader.getState() == State.ACTIVE && zkShardTerms.getHighestTerm() == zkShardTerms.getTerm(leader.getName())) { throw new SolrException(ErrorCode.SERVER_ERROR, "The shard already has an active leader. Force leader is not applicable. State: " + slice); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.handler.admin; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.solr.api.Api; import org.apache.solr.client.solrj.SolrResponse; import org.apache.solr.client.solrj.impl.HttpSolrClient; import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder; import org.apache.solr.client.solrj.request.CollectionAdminRequest; import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard; import org.apache.solr.client.solrj.response.RequestStatusState; import org.apache.solr.client.solrj.util.SolrIdentifierValidator; import org.apache.solr.cloud.OverseerSolrResponse; import org.apache.solr.cloud.OverseerSolrResponseSerializer; import org.apache.solr.cloud.OverseerTaskQueue; import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent; import org.apache.solr.cloud.ZkController; import org.apache.solr.cloud.ZkController.NotInClusterStateException; import org.apache.solr.cloud.ZkShardTerms; import org.apache.solr.cloud.api.collections.ReindexCollectionCmd; import org.apache.solr.cloud.api.collections.RoutedAlias; import org.apache.solr.cloud.overseer.SliceMutator; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.cloud.Aliases; import org.apache.solr.common.cloud.ClusterProperties; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.CollectionProperties; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.cloud.ImplicitDocRouter; import org.apache.solr.common.cloud.Replica; import org.apache.solr.common.cloud.Replica.State; import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.cloud.SolrZkClient; import org.apache.solr.common.cloud.ZkCmdExecutor; import org.apache.solr.common.cloud.ZkCoreNodeProps; import org.apache.solr.common.cloud.ZkNodeProps; import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.CollectionAdminParams; import org.apache.solr.common.params.CollectionParams; import org.apache.solr.common.params.CollectionParams.CollectionAction; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.CoreAdminParams; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.Utils; import org.apache.solr.core.CloudConfig; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.backup.repository.BackupRepository; import org.apache.solr.core.snapshots.CollectionSnapshotMetaData; import org.apache.solr.core.snapshots.SolrSnapshotManager; import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.logging.MDCLoggingContext; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.security.AuthorizationContext; import org.apache.solr.security.PermissionNameProvider; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.client.solrj.response.RequestStatusState.COMPLETED; import static org.apache.solr.client.solrj.response.RequestStatusState.FAILED; import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND; import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING; import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED; import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_PROP_PREFIX; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_IF_DOWN; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARDS_PROP; import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE; import static org.apache.solr.cloud.api.collections.RoutedAlias.CREATE_COLLECTION_PREFIX; import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST; import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER; import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP; import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS; import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP; import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP; import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS; import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR; import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP; import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE; import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP; import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS; import static org.apache.solr.common.params.CollectionAdminParams.ALIAS; import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION; import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF; import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP; import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES; import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_NAME; import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_VALUE; import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT; import static org.apache.solr.common.params.CollectionParams.CollectionAction.*; import static org.apache.solr.common.params.CommonAdminParams.ASYNC; import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE; import static org.apache.solr.common.params.CommonAdminParams.NUM_SUB_SHARDS; import static org.apache.solr.common.params.CommonAdminParams.SPLIT_BY_PREFIX; import static org.apache.solr.common.params.CommonAdminParams.SPLIT_FUZZ; import static org.apache.solr.common.params.CommonAdminParams.SPLIT_METHOD; import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE; import static org.apache.solr.common.params.CommonParams.NAME; import static org.apache.solr.common.params.CommonParams.TIMING; import static org.apache.solr.common.params.CommonParams.VALUE_LONG; import static org.apache.solr.common.params.CoreAdminParams.DATA_DIR; import static org.apache.solr.common.params.CoreAdminParams.DELETE_DATA_DIR; import static org.apache.solr.common.params.CoreAdminParams.DELETE_INDEX; import static org.apache.solr.common.params.CoreAdminParams.DELETE_INSTANCE_DIR; import static org.apache.solr.common.params.CoreAdminParams.DELETE_METRICS_HISTORY; import static org.apache.solr.common.params.CoreAdminParams.INSTANCE_DIR; import static org.apache.solr.common.params.CoreAdminParams.ULOG_DIR; import static org.apache.solr.common.params.ShardParams._ROUTE_; import static org.apache.solr.common.util.StrUtils.formatString; public class CollectionsHandler extends RequestHandlerBase implements PermissionNameProvider { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); protected final CoreContainer coreContainer; private final CollectionHandlerApi v2Handler; public CollectionsHandler() { super(); // Unlike most request handlers, CoreContainer initialization // should happen in the constructor... this.coreContainer = null; v2Handler = new CollectionHandlerApi(this); } /** * Overloaded ctor to inject CoreContainer into the handler. * * @param coreContainer Core Container of the solr webapp installed. */ public CollectionsHandler(final CoreContainer coreContainer) { this.coreContainer = coreContainer; v2Handler = new CollectionHandlerApi(this); } @Override public PermissionNameProvider.Name getPermissionName(AuthorizationContext ctx) { String action = ctx.getParams().get("action"); if (action == null) return PermissionNameProvider.Name.COLL_READ_PERM; CollectionParams.CollectionAction collectionAction = CollectionParams.CollectionAction.get(action); if (collectionAction == null) return null; return collectionAction.isWrite ? PermissionNameProvider.Name.COLL_EDIT_PERM : PermissionNameProvider.Name.COLL_READ_PERM; } @Override final public void init(@SuppressWarnings({"rawtypes"})NamedList args) { } /** * The instance of CoreContainer this handler handles. This should be the CoreContainer instance that created this * handler. * * @return a CoreContainer instance */ public CoreContainer getCoreContainer() { return this.coreContainer; } protected void copyFromClusterProp(Map<String, Object> props, String prop) throws IOException { if (props.get(prop) != null) return;//if it's already specified , return Object defVal = new ClusterProperties(coreContainer.getZkController().getZkStateReader().getZkClient()) .getClusterProperty(ImmutableList.of(CollectionAdminParams.DEFAULTS, CollectionAdminParams.COLLECTION, prop), null); if (defVal != null) props.put(prop, String.valueOf(defVal)); } @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { // Make sure the cores is enabled CoreContainer cores = checkErrors(); // Pick the action SolrParams params = req.getParams(); String a = params.get(CoreAdminParams.ACTION); if (a != null) { CollectionAction action = CollectionAction.get(a); if (action == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a); } CollectionOperation operation = CollectionOperation.get(action); if (log.isDebugEnabled()) { log.debug("Invoked Collection Action :{} with params {} and sendToOCPQueue={}" , action.toLower(), req.getParamString(), operation.sendToOCPQueue); } MDCLoggingContext.setCollection(req.getParams().get(COLLECTION)); invokeAction(req, rsp, cores, action, operation); } else { throw new SolrException(ErrorCode.BAD_REQUEST, "action is a required param"); } rsp.setHttpCaching(false); } protected CoreContainer checkErrors() { CoreContainer cores = getCoreContainer(); if (cores == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "Core container instance missing"); } // Make sure that the core is ZKAware if (!cores.isZooKeeperAware()) { throw new SolrException(ErrorCode.BAD_REQUEST, "Solr instance is not running in SolrCloud mode."); } return cores; } @SuppressWarnings({"unchecked"}) void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer cores, CollectionAction action, CollectionOperation operation) throws Exception { if (!coreContainer.isZooKeeperAware()) { throw new SolrException(BAD_REQUEST, "Invalid request. collections can be accessed only in SolrCloud mode"); } Map<String, Object> props = operation.execute(req, rsp, this); if (props == null) { return; } String asyncId = req.getParams().get(ASYNC); if (asyncId != null) { props.put(ASYNC, asyncId); } props.put(QUEUE_OPERATION, operation.action.toLower()); if (operation.sendToOCPQueue) { ZkNodeProps zkProps = new ZkNodeProps(props); SolrResponse overseerResponse = sendToOCPQueue(zkProps, operation.timeOut); rsp.getValues().addAll(overseerResponse.getResponse()); Exception exp = overseerResponse.getException(); if (exp != null) { rsp.setException(exp); } //TODO yuck; shouldn't create-collection at the overseer do this? (conditionally perhaps) if (action.equals(CollectionAction.CREATE) && asyncId == null) { if (rsp.getException() == null) { waitForActiveCollection(zkProps.getStr(NAME), cores, overseerResponse); } } } else { // submits and doesn't wait for anything (no response) coreContainer.getZkController().getOverseer().offerStateUpdate(Utils.toJSON(props)); } } static final Set<String> KNOWN_ROLES = ImmutableSet.of("overseer"); public static long DEFAULT_COLLECTION_OP_TIMEOUT = 180 * 1000; public SolrResponse sendToOCPQueue(ZkNodeProps m) throws KeeperException, InterruptedException { return sendToOCPQueue(m, DEFAULT_COLLECTION_OP_TIMEOUT); } public SolrResponse sendToOCPQueue(ZkNodeProps m, long timeout) throws KeeperException, InterruptedException { String operation = m.getStr(QUEUE_OPERATION); if (operation == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "missing key " + QUEUE_OPERATION); } if (m.get(ASYNC) != null) { String asyncId = m.getStr(ASYNC); if (asyncId.equals("-1")) { throw new SolrException(ErrorCode.BAD_REQUEST, "requestid can not be -1. It is reserved for cleanup purposes."); } NamedList<String> r = new NamedList<>(); if (coreContainer.getZkController().claimAsyncId(asyncId)) { boolean success = false; try { coreContainer.getZkController().getOverseerCollectionQueue() .offer(Utils.toJSON(m)); success = true; } finally { if (!success) { try { coreContainer.getZkController().clearAsyncId(asyncId); } catch (Exception e) { // let the original exception bubble up log.error("Unable to release async ID={}", asyncId, e); SolrZkClient.checkInterrupted(e); } } } } else { r.add("error", "Task with the same requestid already exists."); } r.add(CoreAdminParams.REQUESTID, (String) m.get(ASYNC)); return new OverseerSolrResponse(r); } long time = System.nanoTime(); QueueEvent event = coreContainer.getZkController() .getOverseerCollectionQueue() .offer(Utils.toJSON(m), timeout); if (event.getBytes() != null) { return OverseerSolrResponseSerializer.deserialize(event.getBytes()); } else { if (System.nanoTime() - time >= TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) { throw new SolrException(ErrorCode.SERVER_ERROR, operation + " the collection time out:" + timeout / 1000 + "s"); } else if (event.getWatchedEvent() != null) { throw new SolrException(ErrorCode.SERVER_ERROR, operation + " the collection error [Watcher fired on path: " + event.getWatchedEvent().getPath() + " state: " + event.getWatchedEvent().getState() + " type " + event.getWatchedEvent().getType() + "]"); } else { throw new SolrException(ErrorCode.SERVER_ERROR, operation + " the collection unknown case"); } } } private boolean overseerCollectionQueueContains(String asyncId) throws KeeperException, InterruptedException { OverseerTaskQueue collectionQueue = coreContainer.getZkController().getOverseerCollectionQueue(); return collectionQueue.containsTaskWithRequestId(ASYNC, asyncId); } /** * Copy prefixed params into a map. There must only be one value for these parameters. * * @param params The source of params from which copies should be made * @param props The map into which param names and values should be copied as keys and values respectively * @param prefix The prefix to select. * @return the map supplied in the props parameter, modified to contain the prefixed params. */ private static Map<String, Object> copyPropertiesWithPrefix(SolrParams params, Map<String, Object> props, String prefix) { Iterator<String> iter = params.getParameterNamesIterator(); while (iter.hasNext()) { String param = iter.next(); if (param.startsWith(prefix)) { final String[] values = params.getParams(param); if (values.length != 1) { throw new SolrException(BAD_REQUEST, "Only one value can be present for parameter " + param); } props.put(param, values[0]); } } return props; } public static ModifiableSolrParams params(String... params) { ModifiableSolrParams msp = new ModifiableSolrParams(); for (int i = 0; i < params.length; i += 2) { msp.add(params[i], params[i + 1]); } return msp; } //////////////////////// SolrInfoMBeans methods ////////////////////// @Override public String getDescription() { return "Manage SolrCloud Collections"; } @Override public Category getCategory() { return Category.ADMIN; } private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException { SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient(); ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout()); cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk); cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk); try { String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml"; byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml")); assert data != null && data.length > 0; cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk); path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml"; data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml")); assert data != null && data.length > 0; cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk); } catch (IOException e) { throw new SolrException(ErrorCode.SERVER_ERROR, e); } } private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) { SimpleOrderedMap<String> status = new SimpleOrderedMap<>(); status.add("state", state.getKey()); status.add("msg", msg); results.add("status", status); } public enum CollectionOperation implements CollectionOp { CREATE_OP(CREATE, (req, rsp, h) -> { Map<String, Object> props = copy(req.getParams().required(), null, NAME); props.put("fromApi", "true"); copy(req.getParams(), props, REPLICATION_FACTOR, COLL_CONF, NUM_SLICES, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE, SHARDS_PROP, PULL_REPLICAS, TLOG_REPLICAS, NRT_REPLICAS, WAIT_FOR_FINAL_STATE, ALIAS); if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) { //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR)); int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS)); if (replicationFactor != nrtReplicas) { throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing"); } } if (props.get(REPLICATION_FACTOR) != null) { props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR)); } else if (props.get(NRT_REPLICAS) != null) { props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS)); } final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME)); final String shardsParam = (String) props.get(SHARDS_PROP); if (StringUtils.isNotEmpty(shardsParam)) { verifyShardsParam(shardsParam); } if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) { //We must always create a .system collection with only a single shard props.put(NUM_SLICES, 1); props.remove(SHARDS_PROP); createSysConfigSet(h.coreContainer); } if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES); for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS)) h.copyFromClusterProp(props, prop); copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX); return copyPropertiesWithPrefix(req.getParams(), props, "router."); }), @SuppressWarnings({"unchecked"}) COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> { Map<String, Object> props = copy(req.getParams(), null, COLLECTION_PROP, ColStatus.CORE_INFO_PROP, ColStatus.SEGMENTS_PROP, ColStatus.FIELD_INFO_PROP, ColStatus.RAW_SIZE_PROP, ColStatus.RAW_SIZE_SUMMARY_PROP, ColStatus.RAW_SIZE_DETAILS_PROP, ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP, ColStatus.SIZE_INFO_PROP); new ColStatus(h.coreContainer.getSolrClientCache(), h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props)) .getColStatus(rsp.getValues()); return null; }), DELETE_OP(DELETE, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, NAME); return copy(req.getParams(), map, FOLLOW_ALIASES); }), // XXX should this command support followAliases? RELOAD_OP(RELOAD, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, NAME); return copy(req.getParams(), map); }), RENAME_OP(RENAME, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET); return copy(req.getParams(), map, FOLLOW_ALIASES); }), REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> { Map<String, Object> m = copy(req.getParams().required(), null, NAME); copy(req.getParams(), m, ReindexCollectionCmd.COMMAND, ReindexCollectionCmd.REMOVE_SOURCE, ReindexCollectionCmd.TARGET, ZkStateReader.CONFIGNAME_PROP, NUM_SLICES, NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS, REPLICATION_FACTOR, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE, "shards", CommonParams.ROWS, CommonParams.Q, CommonParams.FL, FOLLOW_ALIASES); if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) { m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP)); } copyPropertiesWithPrefix(req.getParams(), m, "router."); return m; }), SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> { String extCollection = req.getParams().required().get("collection"); String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection); String shard = req.getParams().required().get("shard"); ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); DocCollection docCollection = clusterState.getCollection(collection); ZkNodeProps leaderProps = docCollection.getLeader(shard); ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps); try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl()) .withConnectionTimeout(15000) .withSocketTimeout(60000) .build()) { RequestSyncShard reqSyncShard = new RequestSyncShard(); reqSyncShard.setCollection(collection); reqSyncShard.setShard(shard); reqSyncShard.setCoreName(nodeProps.getCoreName()); client.request(reqSyncShard); } return null; }), @SuppressWarnings({"unchecked"}) CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> { String alias = req.getParams().get(NAME); SolrIdentifierValidator.validateAliasName(alias); String collections = req.getParams().get("collections"); RoutedAlias routedAlias = null; Exception ex = null; HashMap<String,Object> possiblyModifiedParams = new HashMap<>(); try { // note that RA specific validation occurs here. routedAlias = RoutedAlias.fromProps(alias, req.getParams().toMap(possiblyModifiedParams)); } catch (SolrException e) { // we'll throw this later if we are in fact creating a routed alias. ex = e; } ModifiableSolrParams finalParams = new ModifiableSolrParams(); for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) { if (entry.getValue().getClass().isArray() ) { // v2 api hits this case for (Object o : (Object[]) entry.getValue()) { finalParams.add(entry.getKey(),o.toString()); } } else { finalParams.add(entry.getKey(),entry.getValue().toString()); } } if (collections != null) { if (routedAlias != null) { throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias."); } else { ////////////////////////////////////// // Regular alias creation indicated // ////////////////////////////////////// return copy(finalParams.required(), null, NAME, "collections"); } } ///////////////////////////////////////////////// // We are creating a routed alias from here on // ///////////////////////////////////////////////// // If our prior creation attempt had issues expose them now. if (ex != null) { throw ex; } // Now filter out just the parameters we care about from the request assert routedAlias != null; Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams()); copy(finalParams, result, routedAlias.getOptionalParams()); ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix // add to result params that start with "create-collection.". // Additionally, save these without the prefix to createCollParams for (Map.Entry<String, String[]> entry : finalParams) { final String p = entry.getKey(); if (p.startsWith(CREATE_COLLECTION_PREFIX)) { // This is what SolrParams#getAll(Map, Collection)} does final String[] v = entry.getValue(); if (v.length == 1) { result.put(p, v[0]); } else { result.put(p, v); } createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v); } } // Verify that the create-collection prefix'ed params appear to be valid. if (createCollParams.get(NAME) != null) { throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " + "dependent collections, you cannot specify the name."); } if (createCollParams.get(COLL_CONF) == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF); } // note: could insist on a config name here as well.... or wait to throw at overseer createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results return result; }), DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)), /** * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias) */ ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> { Map<String, Object> params = copy(req.getParams().required(), null, NAME); // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case // for api-callers to check for. return convertPrefixToMap(req.getParams(), params, "property"); }), /** * List the aliases and associated properties. */ @SuppressWarnings({"unchecked"}) LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> { ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader(); // if someone calls listAliases, lets ensure we return an up to date response zkStateReader.aliasesManager.update(); Aliases aliases = zkStateReader.getAliases(); if (aliases != null) { // the aliases themselves... rsp.getValues().add("aliases", aliases.getCollectionAliasMap()); // Any properties for the above aliases. Map<String, Map<String, String>> meta = new LinkedHashMap<>(); for (String alias : aliases.getCollectionAliasListMap().keySet()) { Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias); if (!collectionAliasProperties.isEmpty()) { meta.put(alias, collectionAliasProperties); } } rsp.getValues().add("properties", meta); } return null; }), SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true, (req, rsp, h) -> { String name = req.getParams().required().get(COLLECTION_PROP); // TODO : add support for multiple shards String shard = req.getParams().get(SHARD_ID_PROP); String rangesStr = req.getParams().get(CoreAdminParams.RANGES); String splitKey = req.getParams().get("split.key"); String numSubShards = req.getParams().get(NUM_SUB_SHARDS); String fuzz = req.getParams().get(SPLIT_FUZZ); if (splitKey == null && shard == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified."); } if (splitKey != null && shard != null) { throw new SolrException(ErrorCode.BAD_REQUEST, "Only one of 'shard' or 'split.key' should be specified"); } if (splitKey != null && rangesStr != null) { throw new SolrException(ErrorCode.BAD_REQUEST, "Only one of 'ranges' or 'split.key' should be specified"); } if (numSubShards != null && (splitKey != null || rangesStr != null)) { throw new SolrException(ErrorCode.BAD_REQUEST, "numSubShards can not be specified with split.key or ranges parameters"); } if (fuzz != null && (splitKey != null || rangesStr != null)) { throw new SolrException(ErrorCode.BAD_REQUEST, "fuzz can not be specified with split.key or ranges parameters"); } Map<String, Object> map = copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, "split.key", CoreAdminParams.RANGES, WAIT_FOR_FINAL_STATE, TIMING, SPLIT_METHOD, NUM_SUB_SHARDS, SPLIT_FUZZ, SPLIT_BY_PREFIX, FOLLOW_ALIASES); return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX); }), DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP); copy(req.getParams(), map, DELETE_INDEX, DELETE_DATA_DIR, DELETE_INSTANCE_DIR, DELETE_METRICS_HISTORY, FOLLOW_ALIASES); return map; }), FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> { forceLeaderElection(req, h); return null; }), CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP); ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP)); boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false); String extCollectionName = req.getParams().get(COLLECTION_PROP); String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader() .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName; if (!ImplicitDocRouter.NAME.equals(((Map) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME))) throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections"); copy(req.getParams(), map, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS, PULL_REPLICAS, CREATE_NODE_SET, WAIT_FOR_FINAL_STATE, FOLLOW_ALIASES); return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX); }), DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP); return copy(req.getParams(), map, DELETE_INDEX, DELETE_DATA_DIR, DELETE_INSTANCE_DIR, DELETE_METRICS_HISTORY, COUNT_PROP, REPLICA_PROP, SHARD_ID_PROP, ONLY_IF_DOWN, FOLLOW_ALIASES); }), MIGRATE_OP(MIGRATE, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection"); return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES); }), ADDROLE_OP(ADDROLE, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, "role", "node"); if (!KNOWN_ROLES.contains(map.get("role"))) throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES); return map; }), REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, "role", "node"); if (!KNOWN_ROLES.contains(map.get("role"))) throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES); return map; }), CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> { String name = req.getParams().required().get(NAME); String val = req.getParams().get(VALUE_LONG); ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient()); cp.setClusterProperty(name, val); return null; }), COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> { String extCollection = req.getParams().required().get(NAME); String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection); String name = req.getParams().required().get(PROPERTY_NAME); String val = req.getParams().get(PROPERTY_VALUE); CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient()); cp.setCollectionProperty(collection, name, val); return null; }), @SuppressWarnings({"unchecked"}) REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> { req.getParams().required().check(REQUESTID); final CoreContainer coreContainer1 = h.coreContainer; final String requestId = req.getParams().get(REQUESTID); final ZkController zkController = coreContainer1.getZkController(); final NamedList<Object> results = new NamedList<>(); if (zkController.getOverseerCompletedMap().contains(requestId)) { final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId); rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse()); addStatusToResponse(results, COMPLETED, "found [" + requestId + "] in completed tasks"); } else if (zkController.getOverseerFailureMap().contains(requestId)) { final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId); rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse()); addStatusToResponse(results, FAILED, "found [" + requestId + "] in failed tasks"); } else if (zkController.getOverseerRunningMap().contains(requestId)) { addStatusToResponse(results, RUNNING, "found [" + requestId + "] in running tasks"); } else if (h.overseerCollectionQueueContains(requestId)) { addStatusToResponse(results, SUBMITTED, "found [" + requestId + "] in submitted tasks"); } else { addStatusToResponse(results, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue"); } final SolrResponse response = new OverseerSolrResponse(results); rsp.getValues().addAll(response.getResponse()); return null; }), DELETESTATUS_OP(DELETESTATUS, new CollectionOp() { @SuppressWarnings("unchecked") @Override public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception { final CoreContainer coreContainer = h.coreContainer; final String requestId = req.getParams().get(REQUESTID); final ZkController zkController = coreContainer.getZkController(); Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false); if (requestId == null && !flush) { throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified."); } if (requestId != null && flush) { throw new SolrException(ErrorCode.BAD_REQUEST, "Both requestid and flush parameters can not be specified together."); } if (flush) { Collection<String> completed = zkController.getOverseerCompletedMap().keys(); Collection<String> failed = zkController.getOverseerFailureMap().keys(); for (String asyncId : completed) { zkController.getOverseerCompletedMap().remove(asyncId); zkController.clearAsyncId(asyncId); } for (String asyncId : failed) { zkController.getOverseerFailureMap().remove(asyncId); zkController.clearAsyncId(asyncId); } rsp.getValues().add("status", "successfully cleared stored collection api responses"); return null; } else { // Request to cleanup if (zkController.getOverseerCompletedMap().remove(requestId)) { zkController.clearAsyncId(requestId); rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]"); } else if (zkController.getOverseerFailureMap().remove(requestId)) { zkController.clearAsyncId(requestId); rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]"); } else { rsp.getValues().add("status", "[" + requestId + "] not found in stored responses"); // Don't call zkController.clearAsyncId for this, since it could be a running/pending task } } return null; } }), ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> { Map<String, Object> props = copy(req.getParams(), null, COLLECTION_PROP, "node", SHARD_ID_PROP, _ROUTE_, CoreAdminParams.NAME, INSTANCE_DIR, DATA_DIR, ULOG_DIR, REPLICA_TYPE, WAIT_FOR_FINAL_STATE, NRT_REPLICAS, TLOG_REPLICAS, PULL_REPLICAS, CREATE_NODE_SET, FOLLOW_ALIASES, SKIP_NODE_ASSIGNMENT); return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX); }), OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()), /** * Handle list collection request. * Do list collection request to zk host */ @SuppressWarnings({"unchecked"}) LIST_OP(LIST, (req, rsp, h) -> { NamedList<Object> results = new NamedList<>(); Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap(); List<String> collectionList = new ArrayList<>(collections.keySet()); // XXX should we add aliases here? results.add("collections", collectionList); SolrResponse response = new OverseerSolrResponse(results); rsp.getValues().addAll(response.getResponse()); return null; }), /** * Handle cluster status request. * Can return status per specific collection/shard or per all collections. */ CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> { Map<String, Object> all = copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, _ROUTE_); new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(), new ZkNodeProps(all)).getClusterStatus(rsp.getValues()); return null; }), ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_VALUE_PROP); copy(req.getParams(), map, SHARD_UNIQUE); String property = (String) map.get(PROPERTY_PROP); if (!property.startsWith(COLL_PROP_PREFIX)) { property = COLL_PROP_PREFIX + property; } boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE)); // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas // in a slice on properties that are known to only be one-per-slice and error out if so. if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) && uniquePerSlice == false) { throw new SolrException(ErrorCode.BAD_REQUEST, "Overseer replica property command received for property " + property + " with the " + SHARD_UNIQUE + " parameter set to something other than 'true'. No action taken."); } return map; }), // XXX should this command support followAliases? DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP, SHARD_ID_PROP, REPLICA_PROP); return copy(req.getParams(), map, PROPERTY_PROP); }), // XXX should this command support followAliases? BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP); Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE)); String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT); if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) { prop = COLL_PROP_PREFIX + prop; } if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that" + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " + " Property: " + prop + " shardUnique: " + shardUnique); } return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE); }), REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> { new RebalanceLeaders(req, rsp, h).execute(); return null; }), // XXX should this command support followAliases? MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> { Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES); copyPropertiesWithPrefix(req.getParams(), m, COLL_PROP_PREFIX); if (m.isEmpty()) { throw new SolrException(ErrorCode.BAD_REQUEST, formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString())); } copy(req.getParams().required(), m, COLLECTION_PROP); for (Map.Entry<String, Object> entry : m.entrySet()) { String prop = entry.getKey(); if ("".equals(entry.getValue())) { // set to an empty string is equivalent to removing the property, see SOLR-12507 m.put(prop, null); } DocCollection.verifyProp(m, prop); } if (m.get(REPLICATION_FACTOR) != null) { m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR)); } return m; }), BACKUP_OP(BACKUP, (req, rsp, h) -> { req.getParams().required().check(NAME, COLLECTION_PROP); final String extCollectionName = req.getParams().get(COLLECTION_PROP); final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false); final String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader() .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName; final ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); if (!clusterState.hasCollection(collectionName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken."); } CoreContainer cc = h.coreContainer; String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY); BackupRepository repository = cc.newBackupRepository(repo); String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION)); if (location == null) { //Refresh the cluster property file to make sure the value set for location is the latest // Check if the location is specified in the cluster property. location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null); if (location == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query" + " parameter or as a default repository property or as a cluster property."); } } // Check if the specified location is valid for this repository. final URI uri = repository.createURI(location); try { if (!repository.exists(uri)) { throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist."); } } catch (IOException ex) { throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex); } String strategy = req.getParams().get(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY); if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy); } final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP, FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME); params.put(CoreAdminParams.BACKUP_LOCATION, location); if (repo != null) { params.put(CoreAdminParams.BACKUP_REPOSITORY, repo); } params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy); return params; }), RESTORE_OP(RESTORE, (req, rsp, h) -> { req.getParams().required().check(NAME, COLLECTION_PROP); final String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP)); final ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); //We always want to restore into an collection name which doesn't exist yet. if (clusterState.hasCollection(collectionName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken."); } if (h.coreContainer.getZkController().getZkStateReader().getAliases().hasAlias(collectionName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' is an existing alias, no action taken."); } final CoreContainer cc = h.coreContainer; final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY); final BackupRepository repository = cc.newBackupRepository(repo); String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION)); if (location == null) { //Refresh the cluster property file to make sure the value set for location is the latest // Check if the location is specified in the cluster property. location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null); if (location == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query" + " parameter or as a default repository property or as a cluster property."); } } // Check if the specified location is valid for this repository. final URI uri = repository.createURI(location); try { if (!repository.exists(uri)) { throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist."); } } catch (IOException ex) { throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex); } final String createNodeArg = req.getParams().get(CREATE_NODE_SET); if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY." ); } if (req.getParams().get(NRT_REPLICAS) != null && req.getParams().get(REPLICATION_FACTOR) != null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot set both replicationFactor and nrtReplicas as they mean the same thing"); } final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP); params.put(CoreAdminParams.BACKUP_LOCATION, location); if (repo != null) { params.put(CoreAdminParams.BACKUP_REPOSITORY, repo); } // from CREATE_OP: copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS, PULL_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE); copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX); return params; }), CREATESNAPSHOT_OP(CREATESNAPSHOT, (req, rsp, h) -> { req.getParams().required().check(COLLECTION_PROP, CoreAdminParams.COMMIT_NAME); String extCollectionName = req.getParams().get(COLLECTION_PROP); boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false); String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader() .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName; String commitName = req.getParams().get(CoreAdminParams.COMMIT_NAME); ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); if (!clusterState.hasCollection(collectionName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken."); } SolrZkClient client = h.coreContainer.getZkController().getZkClient(); if (SolrSnapshotManager.snapshotExists(client, collectionName, commitName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name '" + commitName + "' already exists for collection '" + collectionName + "', no action taken."); } Map<String, Object> params = copy(req.getParams(), null, COLLECTION_PROP, FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME); return params; }), DELETESNAPSHOT_OP(DELETESNAPSHOT, (req, rsp, h) -> { req.getParams().required().check(COLLECTION_PROP, CoreAdminParams.COMMIT_NAME); String extCollectionName = req.getParams().get(COLLECTION_PROP); String collectionName = h.coreContainer.getZkController().getZkStateReader() .getAliases().resolveSimpleAlias(extCollectionName); ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); if (!clusterState.hasCollection(collectionName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken."); } Map<String, Object> params = copy(req.getParams(), null, COLLECTION_PROP, FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME); return params; }), LISTSNAPSHOTS_OP(LISTSNAPSHOTS, (req, rsp, h) -> { req.getParams().required().check(COLLECTION_PROP); String extCollectionName = req.getParams().get(COLLECTION_PROP); String collectionName = h.coreContainer.getZkController().getZkStateReader() .getAliases().resolveSimpleAlias(extCollectionName); ClusterState clusterState = h.coreContainer.getZkController().getClusterState(); if (!clusterState.hasCollection(collectionName)) { throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken."); } NamedList<Object> snapshots = new NamedList<Object>(); SolrZkClient client = h.coreContainer.getZkController().getZkClient(); Collection<CollectionSnapshotMetaData> m = SolrSnapshotManager.listSnapshots(client, collectionName); for (CollectionSnapshotMetaData meta : m) { snapshots.add(meta.getName(), meta.toNamedList()); } rsp.add(SolrSnapshotManager.SNAPSHOTS_INFO, snapshots); return null; }), REPLACENODE_OP(REPLACENODE, (req, rsp, h) -> { return copy(req.getParams(), null, "source", //legacy "target",//legacy WAIT_FOR_FINAL_STATE, CollectionParams.SOURCE_NODE, CollectionParams.TARGET_NODE); }), MOVEREPLICA_OP(MOVEREPLICA, (req, rsp, h) -> { Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP); return copy(req.getParams(), map, CollectionParams.FROM_NODE, CollectionParams.SOURCE_NODE, CollectionParams.TARGET_NODE, WAIT_FOR_FINAL_STATE, IN_PLACE_MOVE, "replica", "shard", FOLLOW_ALIASES); }), DELETENODE_OP(DELETENODE, (req, rsp, h) -> copy(req.getParams().required(), null, "node")); /** * Places all prefixed properties in the sink map (or a new map) using the prefix as the key and a map of * all prefixed properties as the value. The sub-map keys have the prefix removed. * * @param params The solr params from which to extract prefixed properties. * @param sink The map to add the properties too. * @param prefix The prefix to identify properties to be extracted * @return The sink map, or a new map if the sink map was null */ private static Map<String, Object> convertPrefixToMap(SolrParams params, Map<String, Object> sink, String prefix) { Map<String, Object> result = new LinkedHashMap<>(); Iterator<String> iter = params.getParameterNamesIterator(); while (iter.hasNext()) { String param = iter.next(); if (param.startsWith(prefix)) { result.put(param.substring(prefix.length() + 1), params.get(param)); } } if (sink == null) { sink = new LinkedHashMap<>(); } sink.put(prefix, result); return sink; } public final CollectionOp fun; CollectionAction action; long timeOut; boolean sendToOCPQueue; CollectionOperation(CollectionAction action, CollectionOp fun) { this(action, DEFAULT_COLLECTION_OP_TIMEOUT, true, fun); } CollectionOperation(CollectionAction action, long timeOut, boolean sendToOCPQueue, CollectionOp fun) { this.action = action; this.timeOut = timeOut; this.sendToOCPQueue = sendToOCPQueue; this.fun = fun; } public static CollectionOperation get(CollectionAction action) { for (CollectionOperation op : values()) { if (op.action == action) return op; } throw new SolrException(ErrorCode.SERVER_ERROR, "No such action " + action); } @Override public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception { return fun.execute(req, rsp, h); } } private static void forceLeaderElection(SolrQueryRequest req, CollectionsHandler handler) { ZkController zkController = handler.coreContainer.getZkController(); ClusterState clusterState = zkController.getClusterState(); String extCollectionName = req.getParams().required().get(COLLECTION_PROP); String collectionName = zkController.zkStateReader.getAliases().resolveSimpleAlias(extCollectionName); String sliceId = req.getParams().required().get(SHARD_ID_PROP); log.info("Force leader invoked, state: {}", clusterState); DocCollection collection = clusterState.getCollection(collectionName); Slice slice = collection.getSlice(sliceId); if (slice == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "No shard with name " + sliceId + " exists for collection " + collectionName); } try (ZkShardTerms zkShardTerms = new ZkShardTerms(collectionName, slice.getName(), zkController.getZkClient())) { // if an active replica is the leader, then all is fine already Replica leader = slice.getLeader(); if (leader != null && leader.getState() == State.ACTIVE) { throw new SolrException(ErrorCode.SERVER_ERROR, "The shard already has an active leader. Force leader is not applicable. State: " + slice); } final Set<String> liveNodes = clusterState.getLiveNodes(); List<Replica> liveReplicas = slice.getReplicas().stream() .filter(rep -> liveNodes.contains(rep.getNodeName())).collect(Collectors.toList()); boolean shouldIncreaseReplicaTerms = liveReplicas.stream() .noneMatch(rep -> zkShardTerms.registered(rep.getName()) && zkShardTerms.canBecomeLeader(rep.getName())); // we won't increase replica's terms if exist a live replica with term equals to leader if (shouldIncreaseReplicaTerms) { //TODO only increase terms of replicas less out-of-sync liveReplicas.stream() .filter(rep -> zkShardTerms.registered(rep.getName())) .forEach(rep -> zkShardTerms.setTermEqualsToLeader(rep.getName())); } // Wait till we have an active leader boolean success = false; for (int i = 0; i < 9; i++) { Thread.sleep(5000); clusterState = handler.coreContainer.getZkController().getClusterState(); collection = clusterState.getCollection(collectionName); slice = collection.getSlice(sliceId); if (slice.getLeader() != null && slice.getLeader().getState() == State.ACTIVE) { success = true; break; } log.warn("Force leader attempt {}. Waiting 5 secs for an active leader. State of the slice: {}", (i + 1), slice); //nowarn } if (success) { log.info("Successfully issued FORCELEADER command for collection: {}, shard: {}", collectionName, sliceId); } else { log.info("Couldn't successfully force leader, collection: {}, shard: {}. Cluster state: {}", collectionName, sliceId, clusterState); } } catch (SolrException e) { throw e; } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error executing FORCELEADER operation for collection: " + collectionName + " shard: " + sliceId, e); } } public static void waitForActiveCollection(String collectionName, CoreContainer cc, SolrResponse createCollResponse) throws KeeperException, InterruptedException { if (createCollResponse.getResponse().get("exception") != null) { // the main called failed, don't wait if (log.isInfoEnabled()) { log.info("Not waiting for active collection due to exception: {}", createCollResponse.getResponse().get("exception")); } return; } int replicaFailCount; if (createCollResponse.getResponse().get("failure") != null) { replicaFailCount = ((NamedList) createCollResponse.getResponse().get("failure")).size(); } else { replicaFailCount = 0; } CloudConfig ccfg = cc.getConfig().getCloudConfig(); Integer seconds = ccfg.getCreateCollectionWaitTimeTillActive(); Boolean checkLeaderOnly = ccfg.isCreateCollectionCheckLeaderActive(); if (log.isInfoEnabled()) { log.info("Wait for new collection to be active for at most {} seconds. Check all shard {}" , seconds, (checkLeaderOnly ? "leaders" : "replicas")); } try { cc.getZkController().getZkStateReader().waitForState(collectionName, seconds, TimeUnit.SECONDS, (n, c) -> { if (c == null) { // the collection was not created, don't wait return true; } if (c.getSlices() != null) { Collection<Slice> shards = c.getSlices(); int replicaNotAliveCnt = 0; for (Slice shard : shards) { Collection<Replica> replicas; if (!checkLeaderOnly) replicas = shard.getReplicas(); else { replicas = new ArrayList<Replica>(); replicas.add(shard.getLeader()); } for (Replica replica : replicas) { String state = replica.getStr(ZkStateReader.STATE_PROP); if (log.isDebugEnabled()) { log.debug("Checking replica status, collection={} replica={} state={}", collectionName, replica.getCoreUrl(), state); } if (!n.contains(replica.getNodeName()) || !state.equals(Replica.State.ACTIVE.toString())) { replicaNotAliveCnt++; return false; } } } return (replicaNotAliveCnt == 0) || (replicaNotAliveCnt <= replicaFailCount); } return false; }); } catch (TimeoutException | InterruptedException e) { String error = "Timeout waiting for active collection " + collectionName + " with timeout=" + seconds; throw new NotInClusterStateException(ErrorCode.SERVER_ERROR, error); } } private static void verifyShardsParam(String shardsParam) { for (String shard : shardsParam.split(",")) { SolrIdentifierValidator.validateShardName(shard); } } interface CollectionOp { Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception; } @Override public Collection<Api> getApis() { return v2Handler.getApis(); } @Override public Boolean registerV2() { return Boolean.TRUE; } // These "copy" methods were once SolrParams.getAll but were moved here as there is no universal way that // a SolrParams can be represented in a Map; there are various choices. /** * Copy all params to the given map or if the given map is null create a new one */ static Map<String, Object> copy(SolrParams source, Map<String, Object> sink, Collection<String> paramNames) { if (sink == null) sink = new LinkedHashMap<>(); for (String param : paramNames) { String[] v = source.getParams(param); if (v != null && v.length > 0) { if (v.length == 1) { sink.put(param, v[0]); } else { sink.put(param, v); } } } return sink; } /** * Copy all params to the given map or if the given map is null create a new one */ static Map<String, Object> copy(SolrParams source, Map<String, Object> sink, String... paramNames) { return copy(source, sink, paramNames == null ? Collections.emptyList() : Arrays.asList(paramNames)); } }
1
38,735
I know this is not new code, but should we change `leader.getState() == State.ACTIVE` to `leader.isActive(liveNodes)`?
apache-lucene-solr
java
@@ -105,7 +105,7 @@ public class JavaRuleViolation extends ParametricRuleViolation<JavaNode> { private void setClassNameFrom(JavaNode node) { String qualifiedName = null; - if (node.getScope() instanceof ClassScope) { + if (node instanceof AbstractAnyTypeDeclaration && node.getScope() instanceof ClassScope) { qualifiedName = ((ClassScope) node.getScope()).getClassName(); }
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.rule; import java.util.Iterator; import java.util.Set; import net.sourceforge.pmd.Rule; import net.sourceforge.pmd.RuleContext; import net.sourceforge.pmd.RuleViolation; import net.sourceforge.pmd.lang.ast.Node; import net.sourceforge.pmd.lang.java.ast.ASTCompilationUnit; import net.sourceforge.pmd.lang.java.ast.ASTFieldDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTFormalParameter; import net.sourceforge.pmd.lang.java.ast.ASTLocalVariableDeclaration; import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclarator; import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclaratorId; import net.sourceforge.pmd.lang.java.ast.AbstractAnyTypeDeclaration; import net.sourceforge.pmd.lang.java.ast.AccessNode; import net.sourceforge.pmd.lang.java.ast.CanSuppressWarnings; import net.sourceforge.pmd.lang.java.ast.JavaNode; import net.sourceforge.pmd.lang.java.symboltable.ClassNameDeclaration; import net.sourceforge.pmd.lang.java.symboltable.ClassScope; import net.sourceforge.pmd.lang.java.symboltable.MethodScope; import net.sourceforge.pmd.lang.java.symboltable.SourceFileScope; import net.sourceforge.pmd.lang.rule.ParametricRuleViolation; import net.sourceforge.pmd.lang.symboltable.Scope; /** * This is a Java RuleViolation. It knows how to try to extract the following * extra information from the violation node: * <ul> * <li>Package name</li> * <li>Class name</li> * <li>Method name</li> * <li>Variable name</li> * <li>Suppression indicator</li> * </ul> * @deprecated See {@link RuleViolation} */ @Deprecated public class JavaRuleViolation extends ParametricRuleViolation<JavaNode> { public JavaRuleViolation(Rule rule, RuleContext ctx, JavaNode node, String message, int beginLine, int endLine) { this(rule, ctx, node, message); setLines(beginLine, endLine); } public JavaRuleViolation(Rule rule, RuleContext ctx, JavaNode node, String message) { super(rule, ctx, node, message); if (node != null) { final Scope scope = node.getScope(); final SourceFileScope sourceFileScope = scope.getEnclosingScope(SourceFileScope.class); // Package name is on SourceFileScope packageName = sourceFileScope.getPackageName() == null ? "" : sourceFileScope.getPackageName(); // Class name is built from enclosing ClassScopes setClassNameFrom(node); // Method name comes from 1st enclosing MethodScope if (scope.getEnclosingScope(MethodScope.class) != null) { methodName = scope.getEnclosingScope(MethodScope.class).getName(); } // Variable name node specific setVariableNameIfExists(node); if (!suppressed) { suppressed = isSupressed(node, getRule()); } } } /** * Check for suppression on this node, on parents, and on contained types * for ASTCompilationUnit * * @param node * * @deprecated Is internal API, not useful, there's a typo. See <a href="https://github.com/pmd/pmd/pull/1927">#1927</a> */ @Deprecated public static boolean isSupressed(Node node, Rule rule) { boolean result = suppresses(node, rule); if (!result && node instanceof ASTCompilationUnit) { for (int i = 0; !result && i < node.jjtGetNumChildren(); i++) { result = suppresses(node.jjtGetChild(i), rule); } } if (!result) { Node parent = node.jjtGetParent(); while (!result && parent != null) { result = suppresses(parent, rule); parent = parent.jjtGetParent(); } } return result; } private void setClassNameFrom(JavaNode node) { String qualifiedName = null; if (node.getScope() instanceof ClassScope) { qualifiedName = ((ClassScope) node.getScope()).getClassName(); } for (AbstractAnyTypeDeclaration parent : node.getParentsOfType(AbstractAnyTypeDeclaration.class)) { String clsName = parent.getScope().getEnclosingScope(ClassScope.class).getClassName(); if (qualifiedName == null) { qualifiedName = clsName; } else { qualifiedName = clsName + '$' + qualifiedName; } } if (qualifiedName == null) { Set<ClassNameDeclaration> classes = node.getScope().getEnclosingScope(SourceFileScope.class) .getClassDeclarations().keySet(); for (ClassNameDeclaration c : classes) { // find the first public class/enum declaration if (c.getAccessNodeParent() instanceof AccessNode) { if (((AccessNode) c.getAccessNodeParent()).isPublic()) { qualifiedName = c.getImage(); break; } } } // Still not found? if (qualifiedName == null) { for (ClassNameDeclaration c : classes) { // find the first package-private class/enum declaration if (c.getAccessNodeParent() instanceof AccessNode) { if (((AccessNode) c.getAccessNodeParent()).isPackagePrivate()) { qualifiedName = c.getImage(); break; } } } } } if (qualifiedName != null) { className = qualifiedName; } } private static boolean suppresses(final Node node, Rule rule) { return node instanceof CanSuppressWarnings && ((CanSuppressWarnings) node).hasSuppressWarningsAnnotationFor(rule); } private String getVariableNames(Iterable<ASTVariableDeclaratorId> iterable) { Iterator<ASTVariableDeclaratorId> it = iterable.iterator(); StringBuilder builder = new StringBuilder(); builder.append(it.next()); while (it.hasNext()) { builder.append(", ").append(it.next()); } return builder.toString(); } private void setVariableNameIfExists(Node node) { if (node instanceof ASTFieldDeclaration) { variableName = getVariableNames((ASTFieldDeclaration) node); } else if (node instanceof ASTLocalVariableDeclaration) { variableName = getVariableNames((ASTLocalVariableDeclaration) node); } else if (node instanceof ASTVariableDeclarator) { variableName = node.jjtGetChild(0).getImage(); } else if (node instanceof ASTVariableDeclaratorId) { variableName = node.getImage(); } else if (node instanceof ASTFormalParameter) { setVariableNameIfExists(node.getFirstChildOfType(ASTVariableDeclaratorId.class)); } else { variableName = ""; } } }
1
16,894
Why not use ASTAnyTypeDeclaration? AbstractAnyTypeDeclaration is deprecated
pmd-pmd
java
@@ -119,3 +119,5 @@ from .tools.command import ( version_add, version_list, ) + +from .tools.util import save
1
""" Makes functions in .tools.command accessible directly from quilt. """ # Suppress numpy warnings for Python 2.7 import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") # True: Force dev mode # False: Force normal mode # None: CLI params have not yet been parsed to determine mode. _DEV_MODE = None # Normally a try: except: block on or in main() would be better and simpler, # but we load a bunch of external modules that take a lot of time, during which # ctrl-c will cause an exception that misses that block. ..so, we catch the # signal instead of using try:except, and we catch it here, early during load. # # Note: This doesn't *guarantee* that a traceback won't occur, and there's no # real way to do so, because if it happens early enough (during parsing, for # example, or inside the entry point file) we have no way to stop it. def _install_interrupt_handler(): """Suppress KeyboardInterrupt traceback display in specific situations If not running in dev mode, and if executed from the command line, then we raise SystemExit instead of KeyboardInterrupt. This provides a clean exit. :returns: None if no action is taken, original interrupt handler otherwise """ # These would clutter the quilt.x namespace, so they're imported here instead. import os import sys import signal import pkg_resources from .tools import const # Check to see what entry points / scripts are configred to run quilt from the CLI # By doing this, we have these benefits: # * Avoid closing someone's Jupyter/iPython/bPython session when they hit ctrl-c # * Avoid calling exit() when being used as an external lib # * Provide exceptions when running in Jupyter/iPython/bPython # * Provide exceptions when running in unexpected circumstances quilt = pkg_resources.get_distribution('quilt') executable = os.path.basename(sys.argv[0]) entry_points = quilt.get_entry_map().get('console_scripts', []) # When python is run with '-c', this was executed via 'python -c "<some python code>"' if executable == '-c': # This is awkward and somewhat hackish, but we have to ensure that this is *us* # executing via 'python -c' if len(sys.argv) > 1 and sys.argv[1] == 'quilt testing': # it's us. Let's pretend '-c' is an entry point. entry_points['-c'] = 'blah' sys.argv.pop(1) if executable not in entry_points: return # We're running as a console script. # If not in dev mode, use SystemExit instead of raising KeyboardInterrupt def handle_interrupt(signum, stack): # Check for dev mode if _DEV_MODE is None: # Args and environment have not been parsed, and no _DEV_MODE state has been set. dev_mode = True if len(sys.argv) > 1 and sys.argv[1] == '--dev' else False dev_mode = True if os.environ.get('QUILT_DEV_MODE', '').strip().lower() == 'true' else dev_mode else: # Use forced dev-mode if _DEV_MODE is set dev_mode = _DEV_MODE # In order to display the full traceback, we lose control of the exit code here. # Dev mode ctrl-c exit just produces the generic exit error code 1 if dev_mode: raise KeyboardInterrupt() # Normal exit # avoid annoying prompt displacement when hitting ctrl-c print() exit(const.EXIT_KB_INTERRUPT) return signal.signal(signal.SIGINT, handle_interrupt) # This should be called as early in the execution process as is possible. # ..original handler saved in case someone wants it, but it's probably just signal.default_int_handler. _orig_interrupt_handler = _install_interrupt_handler() from .tools.command import ( access_add, access_list, access_remove, audit, build, check, config, create_user, delete_user, enable_user, disable_user, export, generate, inspect, install, list_packages, list_users, list_users_detailed, load, log, login, login_with_token, logout, ls, delete, push, rm, search, tag_add, tag_list, tag_remove, version_add, version_list, )
1
16,958
A blank line at the very end of each file should eliminate the "No EOF" warning we see above
quiltdata-quilt
py
@@ -818,15 +818,12 @@ func (h *Handler) SignalWithStartWorkflowExecution(ctx context.Context, request // Two simultaneous SignalWithStart requests might try to start a workflow at the same time. // This can result in one of the requests failing with one of two possible errors: - // 1) If it is a brand new WF ID, one of the requests can fail with WorkflowExecutionAlreadyStartedError - // (createMode is persistence.CreateWorkflowModeBrandNew) - // 2) If it an already existing WF ID, one of the requests can fail with a CurrentWorkflowConditionFailedError - // (createMode is persisetence.CreateWorkflowModeWorkflowIDReuse) + // CurrentWorkflowConditionFailedError || WorkflowConditionFailedError // If either error occurs, just go ahead and retry. It should succeed on the subsequent attempt. // For simplicity, we keep trying unless the context finishes or we get an error that is not one of the // two mentioned above. - _, isExecutionAlreadyStartedErr := err2.(*persistence.WorkflowExecutionAlreadyStartedError) - _, isWorkflowConditionFailedErr := err2.(*persistence.CurrentWorkflowConditionFailedError) + _, isCurrentWorkflowConditionFailedErr := err2.(*persistence.CurrentWorkflowConditionFailedError) + _, isWorkflowConditionFailedErr := err2.(*persistence.WorkflowConditionFailedError) isContextDone := false select {
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package history import ( "context" "fmt" "sync" "sync/atomic" "go.temporal.io/server/common/convert" "go.temporal.io/server/service/history/configs" "go.temporal.io/server/service/history/events" "go.temporal.io/server/service/history/shard" "github.com/pborman/uuid" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" healthpb "google.golang.org/grpc/health/grpc_health_v1" enumsspb "go.temporal.io/server/api/enums/v1" "go.temporal.io/server/api/historyservice/v1" namespacespb "go.temporal.io/server/api/namespace/v1" replicationspb "go.temporal.io/server/api/replication/v1" tokenspb "go.temporal.io/server/api/token/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/resource" serviceerrors "go.temporal.io/server/common/serviceerror" "go.temporal.io/server/common/task" ) type ( // Handler - gRPC handler interface for historyservice Handler struct { resource.Resource status int32 controller *shard.ControllerImpl tokenSerializer common.TaskTokenSerializer startWG sync.WaitGroup config *configs.Config eventNotifier events.Notifier replicationTaskFetchers ReplicationTaskFetchers queueTaskProcessor queueTaskProcessor } ) const ( serviceName = "temporal.api.workflowservice.v1.HistoryService" ) var ( _ shard.EngineFactory = (*Handler)(nil) _ historyservice.HistoryServiceServer = (*Handler)(nil) errNamespaceNotSet = serviceerror.NewInvalidArgument("Namespace not set on request.") errWorkflowExecutionNotSet = serviceerror.NewInvalidArgument("WorkflowExecution not set on request.") errTaskQueueNotSet = serviceerror.NewInvalidArgument("Task queue not set.") errWorkflowIDNotSet = serviceerror.NewInvalidArgument("WorkflowId is not set on request.") errRunIDNotValid = serviceerror.NewInvalidArgument("RunId is not valid UUID.") errSourceClusterNotSet = serviceerror.NewInvalidArgument("Source Cluster not set on request.") errShardIDNotSet = serviceerror.NewInvalidArgument("ShardId not set on request.") errTimestampNotSet = serviceerror.NewInvalidArgument("Timestamp not set on request.") errInvalidTaskType = serviceerror.NewInvalidArgument("Invalid task type") errDeserializeTaskTokenMessage = "Error to deserialize task token. Error: %v." errShuttingDown = serviceerror.NewInternal("Shutting down") ) // NewHandler creates a thrift handler for the history service func NewHandler( resource resource.Resource, config *configs.Config, ) *Handler { handler := &Handler{ Resource: resource, status: common.DaemonStatusInitialized, config: config, tokenSerializer: common.NewProtoTaskTokenSerializer(), } // prevent us from trying to serve requests before shard controller is started and ready handler.startWG.Add(1) return handler } // Start starts the handler func (h *Handler) Start() { if !atomic.CompareAndSwapInt32( &h.status, common.DaemonStatusInitialized, common.DaemonStatusStarted, ) { return } h.replicationTaskFetchers = NewReplicationTaskFetchers( h.GetLogger(), h.config, h.GetClusterMetadata(), h.GetClientBean(), ) h.replicationTaskFetchers.Start() if h.config.EnablePriorityTaskProcessor() { var err error taskPriorityAssigner := newTaskPriorityAssigner( h.GetClusterMetadata().GetCurrentClusterName(), h.GetNamespaceCache(), h.GetLogger(), h.GetMetricsClient(), h.config, ) schedulerType := task.SchedulerType(h.config.TaskSchedulerType()) queueTaskProcessorOptions := &queueTaskProcessorOptions{ schedulerType: schedulerType, } switch schedulerType { case task.SchedulerTypeFIFO: queueTaskProcessorOptions.fifoSchedulerOptions = &task.FIFOTaskSchedulerOptions{ QueueSize: h.config.TaskSchedulerQueueSize(), WorkerCount: h.config.TaskSchedulerWorkerCount(), RetryPolicy: common.CreatePersistanceRetryPolicy(), } case task.SchedulerTypeWRR: queueTaskProcessorOptions.wRRSchedulerOptions = &task.WeightedRoundRobinTaskSchedulerOptions{ Weights: h.config.TaskSchedulerRoundRobinWeights, QueueSize: h.config.TaskSchedulerQueueSize(), WorkerCount: h.config.TaskSchedulerWorkerCount(), RetryPolicy: common.CreatePersistanceRetryPolicy(), } default: h.GetLogger().Fatal("Unknown task scheduler type", tag.Value(schedulerType)) } h.queueTaskProcessor, err = newQueueTaskProcessor( taskPriorityAssigner, queueTaskProcessorOptions, h.GetLogger(), h.GetMetricsClient(), ) if err != nil { h.GetLogger().Fatal("Creating priority task processor failed", tag.Error(err)) } h.queueTaskProcessor.Start() } h.controller = shard.NewController( h.Resource, h, h.config, ) h.eventNotifier = events.NewNotifier(h.GetTimeSource(), h.GetMetricsClient(), h.config.GetShardID) // events notifier must starts before controller h.eventNotifier.Start() h.controller.Start() h.startWG.Done() } // Stop stops the handler func (h *Handler) Stop() { if !atomic.CompareAndSwapInt32( &h.status, common.DaemonStatusStarted, common.DaemonStatusStopped, ) { return } h.replicationTaskFetchers.Stop() if h.queueTaskProcessor != nil { h.queueTaskProcessor.Stop() } h.controller.Stop() h.eventNotifier.Stop() } func (h *Handler) isStopped() bool { return atomic.LoadInt32(&h.status) == common.DaemonStatusStopped } // CreateEngine is implementation for HistoryEngineFactory used for creating the engine instance for shard func (h *Handler) CreateEngine( shardContext shard.Context, ) shard.Engine { return NewEngineWithShardContext( shardContext, h.GetVisibilityManager(), h.GetMatchingClient(), h.GetHistoryClient(), h.GetSDKClient(), h.eventNotifier, h.config, h.replicationTaskFetchers, h.GetMatchingRawClient(), h.queueTaskProcessor, ) } // https://github.com/grpc/grpc/blob/master/doc/health-checking.md func (h *Handler) Check(_ context.Context, request *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { h.GetLogger().Debug("History service health check endpoint (gRPC) reached.") h.startWG.Wait() if request.Service != serviceName { return &healthpb.HealthCheckResponse{ Status: healthpb.HealthCheckResponse_SERVICE_UNKNOWN, }, nil } hs := &healthpb.HealthCheckResponse{ Status: healthpb.HealthCheckResponse_SERVING, } return hs, nil } func (h *Handler) Watch(*healthpb.HealthCheckRequest, healthpb.Health_WatchServer) error { return serviceerror.NewUnimplemented("Watch is not implemented.") } // RecordActivityTaskHeartbeat - Record Activity Task Heart beat. func (h *Handler) RecordActivityTaskHeartbeat(ctx context.Context, request *historyservice.RecordActivityTaskHeartbeatRequest) (_ *historyservice.RecordActivityTaskHeartbeatResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } heartbeatRequest := request.HeartbeatRequest taskToken, err0 := h.tokenSerializer.Deserialize(heartbeatRequest.TaskToken) if err0 != nil { return nil, h.convertError(serviceerror.NewInvalidArgument(fmt.Sprintf(errDeserializeTaskTokenMessage, err0))) } err0 = validateTaskToken(taskToken) if err0 != nil { return nil, h.convertError(err0) } workflowID := taskToken.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } response, err2 := engine.RecordActivityTaskHeartbeat(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return response, nil } // RecordActivityTaskStarted - Record Activity Task started. func (h *Handler) RecordActivityTaskStarted(ctx context.Context, request *historyservice.RecordActivityTaskStartedRequest) (_ *historyservice.RecordActivityTaskStartedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() workflowExecution := request.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() if request.GetNamespaceId() == "" { return nil, h.convertError(errNamespaceNotSet) } engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } response, err2 := engine.RecordActivityTaskStarted(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return response, nil } // RecordWorkflowTaskStarted - Record Workflow Task started. func (h *Handler) RecordWorkflowTaskStarted(ctx context.Context, request *historyservice.RecordWorkflowTaskStartedRequest) (_ *historyservice.RecordWorkflowTaskStartedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() workflowExecution := request.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } if request.PollRequest == nil || request.PollRequest.TaskQueue.GetName() == "" { return nil, h.convertError(errTaskQueueNotSet) } engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { h.GetLogger().Error("RecordWorkflowTaskStarted failed.", tag.Error(err1), tag.WorkflowID(request.WorkflowExecution.GetWorkflowId()), tag.WorkflowRunID(request.WorkflowExecution.GetRunId()), tag.WorkflowScheduleID(request.GetScheduleId()), ) return nil, h.convertError(err1) } response, err2 := engine.RecordWorkflowTaskStarted(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return response, nil } // RespondActivityTaskCompleted - records completion of an activity task func (h *Handler) RespondActivityTaskCompleted(ctx context.Context, request *historyservice.RespondActivityTaskCompletedRequest) (_ *historyservice.RespondActivityTaskCompletedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } completeRequest := request.CompleteRequest taskToken, err0 := h.tokenSerializer.Deserialize(completeRequest.TaskToken) if err0 != nil { return nil, h.convertError(serviceerror.NewInvalidArgument(fmt.Sprintf(errDeserializeTaskTokenMessage, err0))) } err0 = validateTaskToken(taskToken) if err0 != nil { return nil, h.convertError(err0) } workflowID := taskToken.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RespondActivityTaskCompleted(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RespondActivityTaskCompletedResponse{}, nil } // RespondActivityTaskFailed - records failure of an activity task func (h *Handler) RespondActivityTaskFailed(ctx context.Context, request *historyservice.RespondActivityTaskFailedRequest) (_ *historyservice.RespondActivityTaskFailedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } failRequest := request.FailedRequest taskToken, err0 := h.tokenSerializer.Deserialize(failRequest.TaskToken) if err0 != nil { return nil, h.convertError(serviceerror.NewInvalidArgument(fmt.Sprintf(errDeserializeTaskTokenMessage, err0))) } err0 = validateTaskToken(taskToken) if err0 != nil { return nil, h.convertError(err0) } workflowID := taskToken.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RespondActivityTaskFailed(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RespondActivityTaskFailedResponse{}, nil } // RespondActivityTaskCanceled - records failure of an activity task func (h *Handler) RespondActivityTaskCanceled(ctx context.Context, request *historyservice.RespondActivityTaskCanceledRequest) (_ *historyservice.RespondActivityTaskCanceledResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } cancelRequest := request.CancelRequest taskToken, err0 := h.tokenSerializer.Deserialize(cancelRequest.TaskToken) if err0 != nil { return nil, h.convertError(serviceerror.NewInvalidArgument(fmt.Sprintf(errDeserializeTaskTokenMessage, err0))) } err0 = validateTaskToken(taskToken) if err0 != nil { return nil, h.convertError(err0) } workflowID := taskToken.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RespondActivityTaskCanceled(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RespondActivityTaskCanceledResponse{}, nil } // RespondWorkflowTaskCompleted - records completion of a workflow task func (h *Handler) RespondWorkflowTaskCompleted(ctx context.Context, request *historyservice.RespondWorkflowTaskCompletedRequest) (_ *historyservice.RespondWorkflowTaskCompletedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } completeRequest := request.CompleteRequest token, err0 := h.tokenSerializer.Deserialize(completeRequest.TaskToken) if err0 != nil { return nil, h.convertError(serviceerror.NewInvalidArgument(fmt.Sprintf(errDeserializeTaskTokenMessage, err0))) } h.GetLogger().Debug("RespondWorkflowTaskCompleted", tag.WorkflowNamespaceID(token.GetNamespaceId()), tag.WorkflowID(token.GetWorkflowId()), tag.WorkflowRunID(token.GetRunId()), tag.WorkflowScheduleID(token.GetScheduleId())) err0 = validateTaskToken(token) if err0 != nil { return nil, h.convertError(err0) } workflowID := token.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } response, err2 := engine.RespondWorkflowTaskCompleted(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return response, nil } // RespondWorkflowTaskFailed - failed response to workflow task func (h *Handler) RespondWorkflowTaskFailed(ctx context.Context, request *historyservice.RespondWorkflowTaskFailedRequest) (_ *historyservice.RespondWorkflowTaskFailedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } failedRequest := request.FailedRequest token, err0 := h.tokenSerializer.Deserialize(failedRequest.TaskToken) if err0 != nil { return nil, h.convertError(serviceerror.NewInvalidArgument(fmt.Sprintf(errDeserializeTaskTokenMessage, err0))) } h.GetLogger().Debug("RespondWorkflowTaskFailed", tag.WorkflowNamespaceID(token.GetNamespaceId()), tag.WorkflowID(token.GetWorkflowId()), tag.WorkflowRunID(token.GetRunId()), tag.WorkflowScheduleID(token.GetScheduleId())) err0 = validateTaskToken(token) if err0 != nil { return nil, h.convertError(err0) } workflowID := token.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RespondWorkflowTaskFailed(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RespondWorkflowTaskFailedResponse{}, nil } // StartWorkflowExecution - creates a new workflow execution func (h *Handler) StartWorkflowExecution(ctx context.Context, request *historyservice.StartWorkflowExecutionRequest) (_ *historyservice.StartWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } startRequest := request.StartRequest workflowID := startRequest.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } response, err2 := engine.StartWorkflowExecution(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return response, nil } // DescribeHistoryHost returns information about the internal states of a history host func (h *Handler) DescribeHistoryHost(_ context.Context, _ *historyservice.DescribeHistoryHostRequest) (_ *historyservice.DescribeHistoryHostResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() itemsInCacheByIDCount, itemsInCacheByNameCount := h.GetNamespaceCache().GetCacheSize() status := "" switch h.controller.Status() { case common.DaemonStatusInitialized: status = "initialized" case common.DaemonStatusStarted: status = "started" case common.DaemonStatusStopped: status = "stopped" } resp := &historyservice.DescribeHistoryHostResponse{ ShardsNumber: int32(h.controller.NumShards()), ShardIds: h.controller.ShardIDs(), NamespaceCache: &namespacespb.NamespaceCacheInfo{ ItemsInCacheByIdCount: itemsInCacheByIDCount, ItemsInCacheByNameCount: itemsInCacheByNameCount, }, ShardControllerStatus: status, Address: h.GetHostInfo().GetAddress(), } return resp, nil } // RemoveTask returns information about the internal states of a history host func (h *Handler) RemoveTask(_ context.Context, request *historyservice.RemoveTaskRequest) (_ *historyservice.RemoveTaskResponse, retError error) { executionMgr, err := h.GetExecutionManager(request.GetShardId()) if err != nil { return nil, err } switch request.GetCategory() { case enumsspb.TASK_CATEGORY_TRANSFER: err = executionMgr.CompleteTransferTask(&persistence.CompleteTransferTaskRequest{ TaskID: request.GetTaskId(), }) case enumsspb.TASK_CATEGORY_VISIBILITY: err = executionMgr.CompleteVisibilityTask(&persistence.CompleteVisibilityTaskRequest{ TaskID: request.GetTaskId(), }) case enumsspb.TASK_CATEGORY_TIMER: err = executionMgr.CompleteTimerTask(&persistence.CompleteTimerTaskRequest{ VisibilityTimestamp: timestamp.TimeValue(request.GetVisibilityTime()), TaskID: request.GetTaskId(), }) case enumsspb.TASK_CATEGORY_REPLICATION: err = executionMgr.CompleteReplicationTask(&persistence.CompleteReplicationTaskRequest{ TaskID: request.GetTaskId(), }) default: err = errInvalidTaskType } return &historyservice.RemoveTaskResponse{}, err } // CloseShard closes a shard hosted by this instance func (h *Handler) CloseShard(_ context.Context, request *historyservice.CloseShardRequest) (_ *historyservice.CloseShardResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.controller.RemoveEngineForShard(request.GetShardId(), nil) return &historyservice.CloseShardResponse{}, nil } // DescribeMutableState - returns the internal analysis of workflow execution state func (h *Handler) DescribeMutableState(ctx context.Context, request *historyservice.DescribeMutableStateRequest) (_ *historyservice.DescribeMutableStateResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.Execution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } resp, err2 := engine.DescribeMutableState(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return resp, nil } // GetMutableState - returns the id of the next event in the execution's history func (h *Handler) GetMutableState(ctx context.Context, request *historyservice.GetMutableStateRequest) (_ *historyservice.GetMutableStateResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.Execution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } resp, err2 := engine.GetMutableState(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return resp, nil } // PollMutableState - returns the id of the next event in the execution's history func (h *Handler) PollMutableState(ctx context.Context, request *historyservice.PollMutableStateRequest) (_ *historyservice.PollMutableStateResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.Execution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } resp, err2 := engine.PollMutableState(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return resp, nil } // DescribeWorkflowExecution returns information about the specified workflow execution. func (h *Handler) DescribeWorkflowExecution(ctx context.Context, request *historyservice.DescribeWorkflowExecutionRequest) (_ *historyservice.DescribeWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.Request.Execution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } resp, err2 := engine.DescribeWorkflowExecution(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return resp, nil } // RequestCancelWorkflowExecution - requests cancellation of a workflow func (h *Handler) RequestCancelWorkflowExecution(ctx context.Context, request *historyservice.RequestCancelWorkflowExecutionRequest) (_ *historyservice.RequestCancelWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" || request.CancelRequest.GetNamespace() == "" { return nil, h.convertError(errNamespaceNotSet) } cancelRequest := request.CancelRequest h.GetLogger().Debug("RequestCancelWorkflowExecution", tag.WorkflowNamespace(cancelRequest.GetNamespace()), tag.WorkflowNamespaceID(request.GetNamespaceId()), tag.WorkflowID(cancelRequest.WorkflowExecution.GetWorkflowId()), tag.WorkflowRunID(cancelRequest.WorkflowExecution.GetRunId())) workflowID := cancelRequest.WorkflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RequestCancelWorkflowExecution(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RequestCancelWorkflowExecutionResponse{}, nil } // SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in // WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution. func (h *Handler) SignalWorkflowExecution(ctx context.Context, request *historyservice.SignalWorkflowExecutionRequest) (_ *historyservice.SignalWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.SignalRequest.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.SignalWorkflowExecution(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.SignalWorkflowExecutionResponse{}, nil } // SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution. // If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history // and a workflow task being created for the execution. // If workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled // event recorded in history, and a workflow task being created for the execution func (h *Handler) SignalWithStartWorkflowExecution(ctx context.Context, request *historyservice.SignalWithStartWorkflowExecutionRequest) (_ *historyservice.SignalWithStartWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } signalWithStartRequest := request.SignalWithStartRequest workflowID := signalWithStartRequest.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } for { resp, err2 := engine.SignalWithStartWorkflowExecution(ctx, request) if err2 == nil { return resp, nil } // Two simultaneous SignalWithStart requests might try to start a workflow at the same time. // This can result in one of the requests failing with one of two possible errors: // 1) If it is a brand new WF ID, one of the requests can fail with WorkflowExecutionAlreadyStartedError // (createMode is persistence.CreateWorkflowModeBrandNew) // 2) If it an already existing WF ID, one of the requests can fail with a CurrentWorkflowConditionFailedError // (createMode is persisetence.CreateWorkflowModeWorkflowIDReuse) // If either error occurs, just go ahead and retry. It should succeed on the subsequent attempt. // For simplicity, we keep trying unless the context finishes or we get an error that is not one of the // two mentioned above. _, isExecutionAlreadyStartedErr := err2.(*persistence.WorkflowExecutionAlreadyStartedError) _, isWorkflowConditionFailedErr := err2.(*persistence.CurrentWorkflowConditionFailedError) isContextDone := false select { case <-ctx.Done(): isContextDone = true if ctxErr := ctx.Err(); ctxErr != nil { err2 = ctxErr } default: } if (!isExecutionAlreadyStartedErr && !isWorkflowConditionFailedErr) || isContextDone { return nil, h.convertError(err2) } } } // RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently // used to clean execution info when signal workflow task finished. func (h *Handler) RemoveSignalMutableState(ctx context.Context, request *historyservice.RemoveSignalMutableStateRequest) (_ *historyservice.RemoveSignalMutableStateResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RemoveSignalMutableState(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RemoveSignalMutableStateResponse{}, nil } // TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event // in the history and immediately terminating the execution instance. func (h *Handler) TerminateWorkflowExecution(ctx context.Context, request *historyservice.TerminateWorkflowExecutionRequest) (_ *historyservice.TerminateWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.TerminateRequest.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.TerminateWorkflowExecution(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.TerminateWorkflowExecutionResponse{}, nil } // ResetWorkflowExecution reset an existing workflow execution // in the history and immediately terminating the execution instance. func (h *Handler) ResetWorkflowExecution(ctx context.Context, request *historyservice.ResetWorkflowExecutionRequest) (_ *historyservice.ResetWorkflowExecutionResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.ResetRequest.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } resp, err2 := engine.ResetWorkflowExecution(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return resp, nil } // QueryWorkflow queries a workflow. func (h *Handler) QueryWorkflow(ctx context.Context, request *historyservice.QueryWorkflowRequest) (_ *historyservice.QueryWorkflowResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowID := request.GetRequest().GetExecution().GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } resp, err2 := engine.QueryWorkflow(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return resp, nil } // ScheduleWorkflowTask is used for creating a workflow task for already started workflow execution. This is mainly // used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts // child execution without creating the workflow task and then calls this API after updating the mutable state of // parent execution. func (h *Handler) ScheduleWorkflowTask(ctx context.Context, request *historyservice.ScheduleWorkflowTaskRequest) (_ *historyservice.ScheduleWorkflowTaskResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } if request.WorkflowExecution == nil { return nil, h.convertError(errWorkflowExecutionNotSet) } workflowExecution := request.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.ScheduleWorkflowTask(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.ScheduleWorkflowTaskResponse{}, nil } // RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent. // This is mainly called by transfer queue processor during the processing of DeleteExecution task. func (h *Handler) RecordChildExecutionCompleted(ctx context.Context, request *historyservice.RecordChildExecutionCompletedRequest) (_ *historyservice.RecordChildExecutionCompletedResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } if request.WorkflowExecution == nil { return nil, h.convertError(errWorkflowExecutionNotSet) } workflowExecution := request.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.RecordChildExecutionCompleted(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.RecordChildExecutionCompletedResponse{}, nil } // ResetStickyTaskQueue reset the volatile information in mutable state of a given workflow. // Volatile information are the information related to client, such as: // 1. StickyTaskQueue // 2. StickyScheduleToStartTimeout func (h *Handler) ResetStickyTaskQueue(ctx context.Context, request *historyservice.ResetStickyTaskQueueRequest) (_ *historyservice.ResetStickyTaskQueueResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowID := request.Execution.GetWorkflowId() engine, err := h.controller.GetEngine(namespaceID, workflowID) if err != nil { return nil, h.convertError(err) } resp, err := engine.ResetStickyTaskQueue(ctx, request) if err != nil { return nil, h.convertError(err) } return resp, nil } // ReplicateEventsV2 is called by processor to replicate history events for passive namespaces func (h *Handler) ReplicateEventsV2(ctx context.Context, request *historyservice.ReplicateEventsV2Request) (_ *historyservice.ReplicateEventsV2Response, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if namespaceID == "" { return nil, h.convertError(errNamespaceNotSet) } workflowExecution := request.WorkflowExecution workflowID := workflowExecution.GetWorkflowId() engine, err1 := h.controller.GetEngine(namespaceID, workflowID) if err1 != nil { return nil, h.convertError(err1) } err2 := engine.ReplicateEventsV2(ctx, request) if err2 != nil { return nil, h.convertError(err2) } return &historyservice.ReplicateEventsV2Response{}, nil } // SyncShardStatus is called by processor to sync history shard information from another cluster func (h *Handler) SyncShardStatus(ctx context.Context, request *historyservice.SyncShardStatusRequest) (_ *historyservice.SyncShardStatusResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } if request.GetSourceCluster() == "" { return nil, h.convertError(errSourceClusterNotSet) } if request.GetShardId() == 0 { return nil, h.convertError(errShardIDNotSet) } if timestamp.TimeValue(request.GetStatusTime()).IsZero() { return nil, h.convertError(errTimestampNotSet) } // shard ID is already provided in the request engine, err := h.controller.GetEngineForShard(request.GetShardId()) if err != nil { return nil, h.convertError(err) } err = engine.SyncShardStatus(ctx, request) if err != nil { return nil, h.convertError(err) } return &historyservice.SyncShardStatusResponse{}, nil } // SyncActivity is called by processor to sync activity func (h *Handler) SyncActivity(ctx context.Context, request *historyservice.SyncActivityRequest) (_ *historyservice.SyncActivityResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() if request.GetNamespaceId() == "" || uuid.Parse(request.GetNamespaceId()) == nil { return nil, h.convertError(errNamespaceNotSet) } if request.GetWorkflowId() == "" { return nil, h.convertError(errWorkflowIDNotSet) } if request.GetRunId() == "" || uuid.Parse(request.GetRunId()) == nil { return nil, h.convertError(errRunIDNotValid) } workflowID := request.GetWorkflowId() engine, err := h.controller.GetEngine(namespaceID, workflowID) if err != nil { return nil, h.convertError(err) } err = engine.SyncActivity(ctx, request) if err != nil { return nil, h.convertError(err) } return &historyservice.SyncActivityResponse{}, nil } // GetReplicationMessages is called by remote peers to get replicated messages for cross DC replication func (h *Handler) GetReplicationMessages(ctx context.Context, request *historyservice.GetReplicationMessagesRequest) (_ *historyservice.GetReplicationMessagesResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } var wg sync.WaitGroup wg.Add(len(request.Tokens)) result := new(sync.Map) for _, token := range request.Tokens { go func(token *replicationspb.ReplicationToken) { defer wg.Done() engine, err := h.controller.GetEngineForShard(token.GetShardId()) if err != nil { h.GetLogger().Warn("History engine not found for shard", tag.Error(err)) return } tasks, err := engine.GetReplicationMessages( ctx, request.GetClusterName(), token.GetLastProcessedMessageId(), token.GetLastRetrievedMessageId(), ) if err != nil { h.GetLogger().Warn("Failed to get replication tasks for shard", tag.Error(err)) return } result.Store(token.GetShardId(), tasks) }(token) } wg.Wait() messagesByShard := make(map[int32]*replicationspb.ReplicationMessages) result.Range(func(key, value interface{}) bool { shardID := key.(int32) tasks := value.(*replicationspb.ReplicationMessages) messagesByShard[shardID] = tasks return true }) h.GetLogger().Debug("GetReplicationMessages succeeded.") return &historyservice.GetReplicationMessagesResponse{ShardMessages: messagesByShard}, nil } // GetDLQReplicationMessages is called by remote peers to get replicated messages for DLQ merging func (h *Handler) GetDLQReplicationMessages(ctx context.Context, request *historyservice.GetDLQReplicationMessagesRequest) (_ *historyservice.GetDLQReplicationMessagesResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } taskInfoPerShard := map[int32][]*replicationspb.ReplicationTaskInfo{} // do batch based on workflow ID and run ID for _, taskInfo := range request.GetTaskInfos() { shardID := h.config.GetShardID( taskInfo.GetNamespaceId(), taskInfo.GetWorkflowId(), ) if _, ok := taskInfoPerShard[shardID]; !ok { taskInfoPerShard[shardID] = []*replicationspb.ReplicationTaskInfo{} } taskInfoPerShard[shardID] = append(taskInfoPerShard[shardID], taskInfo) } var wg sync.WaitGroup wg.Add(len(taskInfoPerShard)) tasksChan := make(chan *replicationspb.ReplicationTask, len(request.GetTaskInfos())) handleTaskInfoPerShard := func(taskInfos []*replicationspb.ReplicationTaskInfo) { defer wg.Done() if len(taskInfos) == 0 { return } engine, err := h.controller.GetEngine( taskInfos[0].GetNamespaceId(), taskInfos[0].GetWorkflowId(), ) if err != nil { h.GetLogger().Warn("History engine not found for workflow ID.", tag.Error(err)) return } tasks, err := engine.GetDLQReplicationMessages( ctx, taskInfos, ) if err != nil { h.GetLogger().Error("Failed to get dlq replication tasks.", tag.Error(err)) return } for _, t := range tasks { tasksChan <- t } } for _, replicationTaskInfos := range taskInfoPerShard { go handleTaskInfoPerShard(replicationTaskInfos) } wg.Wait() close(tasksChan) replicationTasks := make([]*replicationspb.ReplicationTask, 0, len(tasksChan)) for t := range tasksChan { replicationTasks = append(replicationTasks, t) } return &historyservice.GetDLQReplicationMessagesResponse{ ReplicationTasks: replicationTasks, }, nil } // ReapplyEvents applies stale events to the current workflow and the current run func (h *Handler) ReapplyEvents(ctx context.Context, request *historyservice.ReapplyEventsRequest) (_ *historyservice.ReapplyEventsResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() workflowID := request.GetRequest().GetWorkflowExecution().GetWorkflowId() engine, err := h.controller.GetEngine(namespaceID, workflowID) if err != nil { return nil, h.convertError(err) } // deserialize history event object historyEvents, err := h.GetPayloadSerializer().DeserializeEvents(&commonpb.DataBlob{ EncodingType: enumspb.ENCODING_TYPE_PROTO3, Data: request.GetRequest().GetEvents().GetData(), }) if err != nil { return nil, h.convertError(err) } execution := request.GetRequest().GetWorkflowExecution() if err := engine.ReapplyEvents( ctx, request.GetNamespaceId(), execution.GetWorkflowId(), execution.GetRunId(), historyEvents, ); err != nil { return nil, h.convertError(err) } return &historyservice.ReapplyEventsResponse{}, nil } func (h *Handler) GetDLQMessages(ctx context.Context, request *historyservice.GetDLQMessagesRequest) (_ *historyservice.GetDLQMessagesResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } engine, err := h.controller.GetEngineForShard(request.GetShardId()) if err != nil { err = h.convertError(err) return nil, err } resp, err := engine.GetDLQMessages(ctx, request) if err != nil { err = h.convertError(err) return nil, err } return resp, nil } func (h *Handler) PurgeDLQMessages(ctx context.Context, request *historyservice.PurgeDLQMessagesRequest) (_ *historyservice.PurgeDLQMessagesResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } engine, err := h.controller.GetEngineForShard(request.GetShardId()) if err != nil { err = h.convertError(err) return nil, err } err = engine.PurgeDLQMessages(ctx, request) if err != nil { err = h.convertError(err) return nil, err } return &historyservice.PurgeDLQMessagesResponse{}, nil } func (h *Handler) MergeDLQMessages(ctx context.Context, request *historyservice.MergeDLQMessagesRequest) (_ *historyservice.MergeDLQMessagesResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } engine, err := h.controller.GetEngineForShard(request.GetShardId()) if err != nil { err = h.convertError(err) return nil, err } resp, err := engine.MergeDLQMessages(ctx, request) if err != nil { err = h.convertError(err) return nil, err } return resp, nil } func (h *Handler) RefreshWorkflowTasks(ctx context.Context, request *historyservice.RefreshWorkflowTasksRequest) (_ *historyservice.RefreshWorkflowTasksResponse, retError error) { defer log.CapturePanic(h.GetLogger(), &retError) h.startWG.Wait() if h.isStopped() { return nil, errShuttingDown } namespaceID := request.GetNamespaceId() execution := request.GetRequest().GetExecution() workflowID := execution.GetWorkflowId() engine, err := h.controller.GetEngine(namespaceID, workflowID) if err != nil { err = h.convertError(err) return nil, err } err = engine.RefreshWorkflowTasks( ctx, namespaceID, commonpb.WorkflowExecution{ WorkflowId: execution.WorkflowId, RunId: execution.RunId, }, ) if err != nil { err = h.convertError(err) return nil, err } return &historyservice.RefreshWorkflowTasksResponse{}, nil } // convertError is a helper method to convert ShardOwnershipLostError from persistence layer returned by various // HistoryEngine API calls to ShardOwnershipLost error return by HistoryService for client to be redirected to the // correct shard. func (h *Handler) convertError(err error) error { switch err.(type) { case *persistence.ShardOwnershipLostError: shardID := err.(*persistence.ShardOwnershipLostError).ShardID info, err := h.GetHistoryServiceResolver().Lookup(convert.Int32ToString(shardID)) if err == nil { return serviceerrors.NewShardOwnershipLost(h.GetHostInfo().GetAddress(), info.GetAddress()) } return serviceerrors.NewShardOwnershipLost(h.GetHostInfo().GetAddress(), "<unknown>") case *persistence.WorkflowExecutionAlreadyStartedError: err := err.(*persistence.WorkflowExecutionAlreadyStartedError) return serviceerror.NewWorkflowExecutionAlreadyStarted(err.Msg, err.StartRequestID, err.RunID) case *persistence.CurrentWorkflowConditionFailedError: err := err.(*persistence.CurrentWorkflowConditionFailedError) return serviceerror.NewInternal(err.Msg) case *persistence.TransactionSizeLimitError: err := err.(*persistence.TransactionSizeLimitError) return serviceerror.NewInvalidArgument(err.Msg) } return err } func validateTaskToken(taskToken *tokenspb.Task) error { if taskToken.GetWorkflowId() == "" { return errWorkflowIDNotSet } return nil }
1
12,174
These would both be more robust with `errors.As` so that we could safely chain errors.
temporalio-temporal
go
@@ -299,9 +299,9 @@ SwiftArrayBufferHandler::CreateBufferHandler(ValueObject &valobj) { // For now we have to keep the old mangled name since the Objc->Swift bindings // that are in Foundation don't get the new mangling. - if (valobj_typename.startswith(SwiftLanguageRuntime::GetCurrentMangledName("_TtCs23_ContiguousArrayStorage")) - || valobj_typename.startswith("_TtCs23_ContiguousArrayStorage") - || valobj_typename.startswith("Swift._ContiguousArrayStorage")) { + if (valobj_typename.startswith("_TtCs23_ContiguousArrayStorage") || + valobj_typename.startswith("_TtCs23_ContiguousArrayStorage") || + valobj_typename.startswith("Swift._ContiguousArrayStorage")) { CompilerType anyobject_type = valobj.GetTargetSP()->GetScratchClangASTContext()->GetBasicType( lldb::eBasicTypeObjCID);
1
//===-- SwiftArray.cpp ------------------------------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "SwiftArray.h" #include "lldb/Core/ValueObjectConstResult.h" #include "lldb/DataFormatters/FormattersHelpers.h" #include "lldb/Symbol/ClangASTContext.h" #include "lldb/Symbol/SwiftASTContext.h" #include "lldb/Target/Process.h" #include "lldb/Target/SwiftLanguageRuntime.h" #include "lldb/Target/Target.h" // FIXME: we should not need this #include "Plugins/Language/ObjC/Cocoa.h" #include "swift/AST/ASTContext.h" #include "llvm/ADT/StringRef.h" using namespace lldb; using namespace lldb_private; using namespace lldb_private::formatters; using namespace lldb_private::formatters::swift; size_t SwiftArrayNativeBufferHandler::GetCount() { return m_size; } size_t SwiftArrayNativeBufferHandler::GetCapacity() { return m_capacity; } lldb_private::CompilerType SwiftArrayNativeBufferHandler::GetElementType() { return m_elem_type; } ValueObjectSP SwiftArrayNativeBufferHandler::GetElementAtIndex(size_t idx) { if (idx >= m_size) return ValueObjectSP(); lldb::addr_t child_location = m_first_elem_ptr + idx * m_element_stride; ProcessSP process_sp(m_exe_ctx_ref.GetProcessSP()); if (!process_sp) return ValueObjectSP(); DataBufferSP buffer(new DataBufferHeap(m_element_size, 0)); Status error; if (process_sp->ReadMemory(child_location, buffer->GetBytes(), m_element_size, error) != m_element_size || error.Fail()) return ValueObjectSP(); DataExtractor data(buffer, process_sp->GetByteOrder(), process_sp->GetAddressByteSize()); StreamString name; name.Printf("[%zu]", idx); return ValueObject::CreateValueObjectFromData(name.GetData(), data, m_exe_ctx_ref, m_elem_type); } SwiftArrayNativeBufferHandler::SwiftArrayNativeBufferHandler( ValueObject &valobj, lldb::addr_t native_ptr, CompilerType elem_type) : m_metadata_ptr(LLDB_INVALID_ADDRESS), m_reserved_word(LLDB_INVALID_ADDRESS), m_size(0), m_capacity(0), m_first_elem_ptr(LLDB_INVALID_ADDRESS), m_elem_type(elem_type), m_element_size(0), m_element_stride(0), m_exe_ctx_ref(valobj.GetExecutionContextRef()) { if (native_ptr == LLDB_INVALID_ADDRESS) return; if (native_ptr == 0) { // 0 is a valid value for the pointer here - it just means empty // never-written-to array m_metadata_ptr = 0; m_reserved_word = 0; m_size = m_capacity = 0; m_first_elem_ptr = 0; return; } ProcessSP process_sp(m_exe_ctx_ref.GetProcessSP()); if (!process_sp) return; auto opt_size = elem_type.GetByteSize(process_sp.get()); if (opt_size) m_element_size = *opt_size; auto opt_stride = elem_type.GetByteStride(process_sp.get()); if (opt_stride) m_element_stride = *opt_stride; size_t ptr_size = process_sp->GetAddressByteSize(); Status error; lldb::addr_t next_read = native_ptr; m_metadata_ptr = process_sp->ReadPointerFromMemory(next_read, error); if (error.Fail()) return; next_read += ptr_size; m_reserved_word = process_sp->ReadUnsignedIntegerFromMemory(next_read, ptr_size, 0, error); if (error.Fail()) return; next_read += ptr_size; m_size = process_sp->ReadUnsignedIntegerFromMemory(next_read, ptr_size, 0, error); if (error.Fail()) return; next_read += ptr_size; m_capacity = process_sp->ReadUnsignedIntegerFromMemory(next_read, ptr_size, 0, error); if (error.Fail()) return; next_read += ptr_size; m_first_elem_ptr = next_read; } bool SwiftArrayNativeBufferHandler::IsValid() { return m_metadata_ptr != LLDB_INVALID_ADDRESS && m_first_elem_ptr != LLDB_INVALID_ADDRESS && m_capacity >= m_size && m_elem_type.IsValid(); } size_t SwiftArrayBridgedBufferHandler::GetCount() { return m_frontend->CalculateNumChildren(); } size_t SwiftArrayBridgedBufferHandler::GetCapacity() { return GetCount(); } lldb_private::CompilerType SwiftArrayBridgedBufferHandler::GetElementType() { return m_elem_type; } lldb::ValueObjectSP SwiftArrayBridgedBufferHandler::GetElementAtIndex(size_t idx) { return m_frontend->GetChildAtIndex(idx); } SwiftArrayBridgedBufferHandler::SwiftArrayBridgedBufferHandler( ProcessSP process_sp, lldb::addr_t native_ptr) : SwiftArrayBufferHandler(), m_elem_type(), m_synth_array_sp(), m_frontend(nullptr) { m_elem_type = process_sp->GetTarget().GetScratchClangASTContext()->GetBasicType( lldb::eBasicTypeObjCID); InferiorSizedWord isw(native_ptr, *process_sp); m_synth_array_sp = ValueObjectConstResult::CreateValueObjectFromData( "_", isw.GetAsData(process_sp->GetByteOrder()), *process_sp, m_elem_type); if ((m_frontend = NSArraySyntheticFrontEndCreator(nullptr, m_synth_array_sp))) m_frontend->Update(); } bool SwiftArrayBridgedBufferHandler::IsValid() { return m_synth_array_sp.get() != nullptr && m_frontend != nullptr; } size_t SwiftArraySliceBufferHandler::GetCount() { return m_size; } size_t SwiftArraySliceBufferHandler::GetCapacity() { // Slices don't have a separate capacity - at least not in any obvious sense return m_size; } lldb_private::CompilerType SwiftArraySliceBufferHandler::GetElementType() { return m_elem_type; } lldb::ValueObjectSP SwiftArraySliceBufferHandler::GetElementAtIndex(size_t idx) { if (idx >= m_size) return ValueObjectSP(); const uint64_t effective_idx = idx + m_start_index; lldb::addr_t child_location = m_first_elem_ptr + effective_idx * m_element_stride; ProcessSP process_sp(m_exe_ctx_ref.GetProcessSP()); if (!process_sp) return ValueObjectSP(); DataBufferSP buffer(new DataBufferHeap(m_element_size, 0)); Status error; if (process_sp->ReadMemory(child_location, buffer->GetBytes(), m_element_size, error) != m_element_size || error.Fail()) return ValueObjectSP(); DataExtractor data(buffer, process_sp->GetByteOrder(), process_sp->GetAddressByteSize()); StreamString name; name.Printf("[%" PRIu64 "]", effective_idx); return ValueObject::CreateValueObjectFromData(name.GetData(), data, m_exe_ctx_ref, m_elem_type); } // this gets passed the "buffer" element? SwiftArraySliceBufferHandler::SwiftArraySliceBufferHandler( ValueObject &valobj, CompilerType elem_type) : m_size(0), m_first_elem_ptr(LLDB_INVALID_ADDRESS), m_elem_type(elem_type), m_element_size(0), m_element_stride(0), m_exe_ctx_ref(valobj.GetExecutionContextRef()), m_native_buffer(false), m_start_index(0) { static ConstString g_start("subscriptBaseAddress"); static ConstString g_value("_value"); static ConstString g__rawValue("_rawValue"); static ConstString g__countAndFlags("endIndexAndFlags"); static ConstString g__startIndex("startIndex"); ProcessSP process_sp(m_exe_ctx_ref.GetProcessSP()); if (!process_sp) return; auto opt_size = elem_type.GetByteSize(process_sp.get()); if (opt_size) m_element_size = *opt_size; auto opt_stride = elem_type.GetByteStride(process_sp.get()); if (opt_stride) m_element_stride = *opt_stride; ValueObjectSP value_sp(valobj.GetChildAtNamePath({g_start, g__rawValue})); if (!value_sp) return; m_first_elem_ptr = value_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS); ValueObjectSP _countAndFlags_sp( valobj.GetChildAtNamePath({g__countAndFlags, g_value})); if (!_countAndFlags_sp) return; ValueObjectSP startIndex_sp( valobj.GetChildAtNamePath({g__startIndex, g_value})); if (startIndex_sp) m_start_index = startIndex_sp->GetValueAsUnsigned(0); InferiorSizedWord isw(_countAndFlags_sp->GetValueAsUnsigned(0), *process_sp); m_size = (isw >> 1).GetValue() - m_start_index; m_native_buffer = !((isw & 1).IsZero()); } bool SwiftArraySliceBufferHandler::IsValid() { return m_first_elem_ptr != LLDB_INVALID_ADDRESS && m_elem_type.IsValid(); } size_t SwiftSyntheticFrontEndBufferHandler::GetCount() { return m_frontend->CalculateNumChildren(); } size_t SwiftSyntheticFrontEndBufferHandler::GetCapacity() { return m_frontend->CalculateNumChildren(); } lldb_private::CompilerType SwiftSyntheticFrontEndBufferHandler::GetElementType() { // this doesn't make sense here - the synthetic children know best return CompilerType(); } lldb::ValueObjectSP SwiftSyntheticFrontEndBufferHandler::GetElementAtIndex(size_t idx) { return m_frontend->GetChildAtIndex(idx); } // this receives a pointer to the NSArray SwiftSyntheticFrontEndBufferHandler::SwiftSyntheticFrontEndBufferHandler( ValueObjectSP valobj_sp) : m_valobj_sp(valobj_sp), m_frontend(NSArraySyntheticFrontEndCreator(nullptr, valobj_sp)) { // Cocoa NSArray frontends must be updated before use if (m_frontend) m_frontend->Update(); } bool SwiftSyntheticFrontEndBufferHandler::IsValid() { return m_frontend.get() != nullptr; } std::unique_ptr<SwiftArrayBufferHandler> SwiftArrayBufferHandler::CreateBufferHandler(ValueObject &valobj) { llvm::StringRef valobj_typename( valobj.GetCompilerType().GetTypeName().AsCString("")); if (valobj_typename.startswith("Swift._NSSwiftArray")) { CompilerType anyobject_type = valobj.GetTargetSP()->GetScratchClangASTContext()->GetBasicType( lldb::eBasicTypeObjCID); auto handler = std::unique_ptr<SwiftArrayBufferHandler>( new SwiftArrayNativeBufferHandler(valobj, valobj.GetPointerValue(), anyobject_type)); if (handler && handler->IsValid()) return handler; return nullptr; } // For now we have to keep the old mangled name since the Objc->Swift bindings // that are in Foundation don't get the new mangling. if (valobj_typename.startswith(SwiftLanguageRuntime::GetCurrentMangledName("_TtCs23_ContiguousArrayStorage")) || valobj_typename.startswith("_TtCs23_ContiguousArrayStorage") || valobj_typename.startswith("Swift._ContiguousArrayStorage")) { CompilerType anyobject_type = valobj.GetTargetSP()->GetScratchClangASTContext()->GetBasicType( lldb::eBasicTypeObjCID); auto handler = std::unique_ptr<SwiftArrayBufferHandler>( new SwiftArrayNativeBufferHandler( valobj, valobj.GetValueAsUnsigned(LLDB_INVALID_ADDRESS), anyobject_type)); if (handler && handler->IsValid()) return handler; return nullptr; } if (valobj_typename.startswith(SwiftLanguageRuntime::GetCurrentMangledName("_TtCs22__SwiftDeferredNSArray")) || valobj_typename.startswith("_TtCs22__SwiftDeferredNSArray") || valobj_typename.startswith("Swift.__SwiftDeferredNSArray") ) { ProcessSP process_sp(valobj.GetProcessSP()); if (!process_sp) return nullptr; Status error; lldb::addr_t buffer_ptr = valobj.GetValueAsUnsigned(LLDB_INVALID_ADDRESS) + 3 * process_sp->GetAddressByteSize(); buffer_ptr = process_sp->ReadPointerFromMemory(buffer_ptr, error); if (error.Fail() || buffer_ptr == LLDB_INVALID_ADDRESS) return nullptr; lldb::addr_t argmetadata_ptr = process_sp->ReadPointerFromMemory(buffer_ptr, error); if (error.Fail() || argmetadata_ptr == LLDB_INVALID_ADDRESS) return nullptr; SwiftLanguageRuntime *swift_runtime = SwiftLanguageRuntime::Get(*process_sp); if (!swift_runtime) return nullptr; CompilerType argument_type; SwiftLanguageRuntime::MetadataPromiseSP promise_sp( swift_runtime->GetMetadataPromise(argmetadata_ptr, valobj)); if (promise_sp) if (CompilerType type = promise_sp->FulfillTypePromise()) argument_type = type.GetGenericArgumentType(0); if (!argument_type.IsValid()) return nullptr; auto handler = std::unique_ptr<SwiftArrayBufferHandler>( new SwiftArrayNativeBufferHandler(valobj, buffer_ptr, argument_type)); if (handler && handler->IsValid()) return handler; return nullptr; } if (valobj_typename.startswith("Swift.NativeArray<")) { // Swift.NativeArray static ConstString g_buffer("_buffer"); static ConstString g_base("base"); static ConstString g_storage("storage"); static ConstString g_some("Some"); ValueObjectSP some_sp(valobj.GetNonSyntheticValue()->GetChildAtNamePath( {g_buffer, g_base, g_storage, g_some})); if (!some_sp) return nullptr; CompilerType elem_type(valobj.GetCompilerType().GetArrayElementType()); auto handler = std::unique_ptr<SwiftArrayBufferHandler>( new SwiftArrayNativeBufferHandler( *some_sp, some_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS), elem_type)); if (handler && handler->IsValid()) return handler; return nullptr; } else if (valobj_typename.startswith("Swift.ArraySlice<")) { // Swift.ArraySlice static ConstString g_buffer("_buffer"); ValueObjectSP buffer_sp( valobj.GetNonSyntheticValue()->GetChildAtNamePath({g_buffer})); if (!buffer_sp) return nullptr; CompilerType elem_type(valobj.GetCompilerType().GetArrayElementType()); auto handler = std::unique_ptr<SwiftArrayBufferHandler>( new SwiftArraySliceBufferHandler(*buffer_sp, elem_type)); if (handler && handler->IsValid()) return handler; return nullptr; } else { // Swift.Array static ConstString g_buffer("_buffer"); static ConstString g__storage("_storage"); static ConstString g_rawValue("rawValue"); ValueObjectSP buffer_sp(valobj.GetNonSyntheticValue()->GetChildAtNamePath( {g_buffer, g__storage, g_rawValue})); // For the new Array version which uses SIL tail-allocated arrays. if (!buffer_sp) buffer_sp = valobj.GetNonSyntheticValue()->GetChildAtNamePath( {g_buffer, g__storage}); if (!buffer_sp) return nullptr; lldb::addr_t storage_location = buffer_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS); if (storage_location != LLDB_INVALID_ADDRESS) { ProcessSP process_sp(valobj.GetProcessSP()); if (!process_sp) return nullptr; SwiftLanguageRuntime *swift_runtime = SwiftLanguageRuntime::Get(*process_sp); if (!swift_runtime) return nullptr; lldb::addr_t masked_storage_location = swift_runtime->MaskMaybeBridgedPointer(storage_location); std::unique_ptr<SwiftArrayBufferHandler> handler; if (masked_storage_location == storage_location) { CompilerType elem_type(valobj.GetCompilerType().GetArrayElementType()); handler.reset(new SwiftArrayNativeBufferHandler( valobj, storage_location, elem_type)); } else { handler.reset(new SwiftArrayBridgedBufferHandler( process_sp, masked_storage_location)); } if (handler && handler->IsValid()) return handler; return nullptr; } else { CompilerType elem_type(valobj.GetCompilerType().GetArrayElementType()); return std::unique_ptr<SwiftArrayBufferHandler>( new SwiftArrayEmptyBufferHandler(elem_type)); } } return nullptr; } bool lldb_private::formatters::swift::Array_SummaryProvider( ValueObject &valobj, Stream &stream, const TypeSummaryOptions &options) { auto handler = SwiftArrayBufferHandler::CreateBufferHandler(valobj); if (!handler) return false; auto count = handler->GetCount(); stream.Printf("%zu value%s", count, (count == 1 ? "" : "s")); return true; }; lldb_private::formatters::swift::ArraySyntheticFrontEnd::ArraySyntheticFrontEnd( lldb::ValueObjectSP valobj_sp) : SyntheticChildrenFrontEnd(*valobj_sp.get()), m_array_buffer() { if (valobj_sp) Update(); } size_t lldb_private::formatters::swift::ArraySyntheticFrontEnd:: CalculateNumChildren() { return m_array_buffer ? m_array_buffer->GetCount() : 0; } lldb::ValueObjectSP lldb_private::formatters::swift::ArraySyntheticFrontEnd::GetChildAtIndex( size_t idx) { if (!m_array_buffer) return ValueObjectSP(); lldb::ValueObjectSP child_sp = m_array_buffer->GetElementAtIndex(idx); if (child_sp) child_sp->SetSyntheticChildrenGenerated(true); return child_sp; } bool lldb_private::formatters::swift::ArraySyntheticFrontEnd::Update() { m_array_buffer = SwiftArrayBufferHandler::CreateBufferHandler(m_backend); return false; } bool lldb_private::formatters::swift::ArraySyntheticFrontEnd::IsValid() { if (m_array_buffer) return m_array_buffer->IsValid(); return false; } bool lldb_private::formatters::swift::ArraySyntheticFrontEnd:: MightHaveChildren() { return true; } size_t lldb_private::formatters::swift::ArraySyntheticFrontEnd:: GetIndexOfChildWithName(ConstString name) { if (!m_array_buffer) return UINT32_MAX; const char *item_name = name.GetCString(); uint32_t idx = ExtractIndexFromString(item_name); if (idx < UINT32_MAX && idx >= CalculateNumChildren()) return UINT32_MAX; return idx; } SyntheticChildrenFrontEnd * lldb_private::formatters::swift::ArraySyntheticFrontEndCreator( CXXSyntheticChildren *, lldb::ValueObjectSP valobj_sp) { if (!valobj_sp) return nullptr; ArraySyntheticFrontEnd *front_end = new ArraySyntheticFrontEnd(valobj_sp); if (front_end && front_end->IsValid()) return front_end; return nullptr; }
1
19,898
This is checking the same condition twice?
apple-swift-lldb
cpp
@@ -157,8 +157,16 @@ void lbann_comm::allreduce(AbsDistMat& m, #ifdef LBANN_HAS_GPU if (m.GetLocalDevice() == El::Device::GPU) { #ifdef AL_HAS_NCCL - // Force GPU matrices to use NCCL. + // We require NCCL for GPU matrices. t = std::type_index(typeid(::Al::NCCLBackend)); + // If available, use the MPI-CUDA backend for small matrices. +#ifdef AL_HAS_MPI_CUDA + // Based on runs on Pascal and Ray. + if ((El::mpi::Size(c) > 4 && local_size <= 8192) || + (El::mpi::Size(c) >= 16 && local_size <= 32768)) { + t = std::type_index(typeid(::Al::MPICUDABackend)); + } +#endif // AL_HAS_MPI_CUDA #else throw lbann_exception("Allreduce on GPU matrix requires NCCL support in" " Aluminum");
1
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // // lbann_comm .hpp .cpp - LBANN communication utilities //////////////////////////////////////////////////////////////////////////////// #include "lbann/comm.hpp" #include "lbann/utils/timer.hpp" #include "lbann/utils/exception.hpp" #include "lbann/utils/cuda.hpp" #include "mpi.h" #include "omp.h" #include <sstream> #include <thread> namespace lbann { // Error utility macro #ifdef LBANN_DEBUG #define checkMPI(mpi_call) { \ const int status = mpi_call; \ if(status != MPI_SUCCESS) { \ char error_string[MPI_MAX_ERROR_STRING]; \ int error_string_len; \ MPI_Error_string(status, error_string, &error_string_len); \ std::cerr << "MPI error: " << std::string(error_string, error_string_len) << "\n"; \ std::cerr << "Error at " << __FILE__ << ":" << __LINE__ << "\n"; \ throw lbann_exception("MPI error"); \ } \ } #else #define checkMPI(status) status #endif // #ifdef LBANN_DEBUG lbann_comm::lbann_comm(int ppm, const El::mpi::Comm world) : world_comm(world), grid(nullptr), procs_per_model(ppm), num_model_barriers(0), num_intermodel_barriers(0), num_global_barriers(0), bytes_sent(0), bytes_received(0) { #ifdef LBANN_HAS_ALUMINUM // Don't have argc/argv here, but MPI should already be init'd. int argc_dummy = 0; char** argv_dummy = nullptr; ::Al::Initialize(argc_dummy, argv_dummy); #endif // Set up the initial model split split_models(procs_per_model); // Initialize node communicators setup_node_comm(); procs_per_node = El::mpi::Size(node_comm); rank_in_node = El::mpi::Rank(node_comm); // Setup threads setup_threads(); } lbann_comm::~lbann_comm() { delete grid; El::mpi::Free(model_comm); El::mpi::Free(intermodel_comm); El::mpi::Free(node_comm); for (auto&& buf_vec : collective_bufs) { for (auto&& buf : buf_vec.second) { delete[] buf; } } #ifdef LBANN_HAS_ALUMINUM m_al_comms.clear(); ::Al::Finalize(); #endif } void lbann_comm::split_models(int ppm) { int world_size = El::mpi::Size(get_world_comm()); procs_per_model = ppm; if (ppm == 0) { procs_per_model = world_size; } // Check if parameters are valid if (procs_per_model > world_size) { throw lbann_exception( std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: Not enough processes to create one model; procs_per_model: " + std::to_string(procs_per_model) + " is larger than world_size: " + std::to_string(world_size)); } if (world_size % procs_per_model != 0) { throw lbann_exception( std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: Procs per model does not divide total number of procs; procs_per_model: " + std::to_string(procs_per_model) + " total number of procs (world size): " + std::to_string(world_size)); } num_models = world_size / procs_per_model; model_rank = El::mpi::Rank(get_world_comm()) / procs_per_model; rank_in_model = El::mpi::Rank(get_world_comm()) % procs_per_model; // Initialize model and intermodel communicators El::mpi::Split(get_world_comm(), model_rank, rank_in_model, model_comm); El::mpi::Split(get_world_comm(), rank_in_model, model_rank, intermodel_comm); // Initialize Elemental grid if (grid != nullptr) { delete grid; } grid = new Grid(model_comm); } void lbann_comm::intermodel_sum_matrix(AbsMat& mat) { bytes_sent += sizeof(DataType) * mat.Height() * mat.Width(); El::AllReduce(mat, intermodel_comm, El::mpi::SUM); bytes_received += sizeof(DataType) * mat.Height() * mat.Width(); } void lbann_comm::intermodel_sum_matrix(AbsDistMat& mat) { allreduce(mat, intermodel_comm, El::mpi::SUM); } void lbann_comm::allreduce(AbsDistMat& m, const El::mpi::Comm c, El::mpi::Op op) { if (El::mpi::Size(c) == 1) { return; // Can skip allreduce on one rank. } const int local_size = m.LocalHeight() * m.LocalWidth(); bytes_sent += sizeof(DataType) * local_size; #ifdef LBANN_HAS_ALUMINUM if (m.LocalHeight() != m.LDim()) { throw lbann_exception("Aluminum does not support allreduces on" " non-contiguous matrices"); } std::type_index t = std::type_index(typeid(::Al::MPIBackend)); #ifdef LBANN_HAS_GPU if (m.GetLocalDevice() == El::Device::GPU) { #ifdef AL_HAS_NCCL // Force GPU matrices to use NCCL. t = std::type_index(typeid(::Al::NCCLBackend)); #else throw lbann_exception("Allreduce on GPU matrix requires NCCL support in" " Aluminum"); #endif // AL_HAS_NCCL } #endif // LBANN_HAS_GPU auto&& comm = get_al_comm(c, t); if (t == std::type_index(typeid(::Al::MPIBackend))) { ::Al::Allreduce<::Al::MPIBackend>( m.Buffer(), local_size, mpi_op_to_al_op(op), *comm); } /// @todo MPI-CUDA backend #ifdef AL_HAS_NCCL if (t == std::type_index(typeid(::Al::NCCLBackend))) { ::Al::Allreduce<::Al::NCCLBackend>( m.Buffer(), local_size, mpi_op_to_al_op(op), *static_cast<::Al::NCCLCommunicator*>(comm)); } #endif // AL_HAS_NCCL #else El::AllReduce(m, c, op); #endif bytes_received += sizeof(DataType) * local_size * (El::mpi::Size(c) - 1); } void lbann_comm::nb_allreduce(AbsDistMat& m, const El::mpi::Comm c, Al::request& req, El::mpi::Op op) { if (El::mpi::Size(c) == 1) { return; // Can skip allreduce on one rank. } #ifdef LBANN_HAS_ALUMINUM const int local_size = m.LocalHeight() * m.LocalWidth(); bytes_sent += sizeof(DataType) * local_size; if (m.LocalHeight() != m.LDim()) { throw lbann_exception("Aluminum does not support allreduces on" " non-contiguous matrices"); } std::type_index t = std::type_index(typeid(::Al::MPIBackend)); #ifdef LBANN_HAS_GPU if (m.GetLocalDevice() == El::Device::GPU) { #ifdef AL_HAS_NCCL // Force GPU matrices to use NCCL. t = std::type_index(typeid(::Al::NCCLBackend)); #else throw lbann_exception("Allreduce on GPU matrix requires NCCL support in" " Aluminum"); #endif // AL_HAS_NCCL } #endif // LBANN_HAS_GPU auto&& comm = get_al_comm(c, t); if (t == std::type_index(typeid(::Al::MPIBackend))) { ::Al::NonblockingAllreduce<::Al::MPIBackend>( m.Buffer(), local_size, mpi_op_to_al_op(op), *comm, req.mpi_req); } /// @todo MPI-CUDA backend #ifdef AL_HAS_NCCL if (t == std::type_index(typeid(::Al::NCCLBackend))) { ::Al::NonblockingAllreduce<::Al::NCCLBackend>( m.Buffer(), local_size, mpi_op_to_al_op(op), *static_cast<::Al::NCCLCommunicator*>(comm), req.nccl_req); } #endif // AL_HAS_NCCL bytes_received += sizeof(DataType) * local_size * (El::mpi::Size(c) - 1); #else allreduce(m, c, op); #endif // LBANN_HAS_ALUMINUM } void lbann_comm::wait(Al::request& req) { #ifdef LBANN_HAS_ALUMINUM if (req.mpi_req != Al::mpi_null_req) { ::Al::Wait<::Al::MPIBackend>(req.mpi_req); } /// @todo MPI-CUDA backend #ifdef AL_HAS_NCCL if (req.nccl_req != Al::nccl_null_req) { // Note this does not block the host. ::Al::Wait<::Al::NCCLBackend>(req.nccl_req); } #endif // AL_HAS_NCCL #endif // LBANN_HAS_ALUMINUM } bool lbann_comm::test(Al::request& req) { bool req_test = true; #ifdef LBANN_HAS_ALUMINUM if (req.mpi_req != Al::mpi_null_req) { req_test = req_test && ::Al::Test<::Al::MPIBackend>(req.mpi_req); } /// @todo MPI-CUDA backend #ifdef AL_HAS_NCCL if (req.nccl_req != Al::nccl_null_req) { req_test = req_test && ::Al::Test<::Al::NCCLBackend>(req.nccl_req); } #endif // AL_HAS_NCCL #endif // LBANN_HAS_ALUMINUM return req_test; } void lbann_comm::intermodel_broadcast_matrix(AbsMat& mat, int root) { El::Broadcast(mat, intermodel_comm, root); } void lbann_comm::intermodel_broadcast_matrix(AbsDistMat& mat, int root) { El::Broadcast(mat, intermodel_comm, root); } template<> void lbann_comm::broadcast<std::string>(const int root, std::string& str, const El::mpi::Comm c) { std::vector<char> data(str.begin(), str.end()); broadcast(root, data, c); str.assign(data.begin(), data.end()); } void lbann_comm::intermodel_barrier() { ++num_intermodel_barriers; barrier(intermodel_comm); } void lbann_comm::model_barrier() { ++num_model_barriers; barrier(model_comm); } void lbann_comm::global_barrier() { ++num_global_barriers; barrier(get_world_comm()); } void lbann_comm::barrier(const El::mpi::Comm c) { El::mpi::Barrier(c); } void lbann_comm::send(const AbsMat& mat, int model, int rank) { send(mat.LockedBuffer(), mat.Height() * mat.Width(), model, rank); } void lbann_comm::send(const DistMat& mat, int model, int rank) { send(mat.LockedBuffer(), mat.LocalHeight() * mat.LocalWidth(), model, rank); } void lbann_comm::nb_send(const AbsMat& mat, int model, int rank, El::mpi::Request<DataType>& req) { nb_send(mat.LockedBuffer(), mat.Height() * mat.Width(), model, rank, req); } void lbann_comm::nb_send(const DistMat& mat, int model, int rank, El::mpi::Request<DataType>& req) { nb_send(mat.LockedBuffer(), mat.LocalHeight() * mat.LocalWidth(), model, rank, req); } void lbann_comm::recv(AbsMat& mat, int model, int rank) { recv(mat.Buffer(), mat.Height() * mat.Width(), model, rank); } void lbann_comm::recv(DistMat& mat, int model, int rank) { recv(mat.Buffer(), mat.LocalHeight() * mat.LocalWidth(), model, rank); } void lbann_comm::recv(AbsMat& mat) { recv(mat.Buffer(), mat.Height() * mat.Width()); } void lbann_comm::recv(DistMat& mat) { recv(mat.Buffer(), mat.LocalHeight() * mat.LocalWidth()); } void lbann_comm::nb_recv(AbsMat& mat, int model, int rank, El::mpi::Request<DataType>& req) { nb_recv(mat.Buffer(), mat.Height() * mat.Width(), model, rank, req); } void lbann_comm::nb_recv(DistMat& mat, int model, int rank, El::mpi::Request<DataType>& req) { nb_recv(mat.Buffer(), mat.LocalHeight() * mat.LocalWidth(), model, rank, req); } void lbann_comm::nb_recv(AbsMat& mat, El::mpi::Request<DataType>& req) { nb_recv(mat.Buffer(), mat.Height() * mat.Width(), req); } void lbann_comm::nb_recv(DistMat& mat, El::mpi::Request<DataType>& req) { nb_recv(mat.Buffer(), mat.LocalHeight() * mat.LocalWidth(), req); } void lbann_comm::intermodel_allreduce( AbsMat& mat, int max_recv_count, std::function<uint8_t *(AbsMat&, El::IR, El::IR, int&, bool, int)> send_transform, std::function<int(uint8_t *, AbsMat&)> recv_transform, std::function<int(uint8_t *, AbsMat&, bool)> recv_apply_transform, const lbann_comm::allreduce_options opts) { // Determine which algorithm to actually use. lbann_comm::allreduce_algorithm algo = opts.algo; if (algo == allreduce_algorithm::DEFAULT) { algo = get_default_allreduce_algorithm(); } const int nprocs = get_num_models(); const El::Int small_message_threshold = 64*64; if (algo == allreduce_algorithm::DYNAMIC) { // For small messages and power-of-2 number of processes, use RD. if (!(nprocs & (nprocs - 1)) && mat.Height() * mat.Width() <= small_message_threshold) { algo = allreduce_algorithm::RECURSIVE_DOUBLING; } else { algo = allreduce_algorithm::PAIRWISE_EXCHANGE_RING; } } switch (algo) { case allreduce_algorithm::RECURSIVE_DOUBLING: recursive_doubling_allreduce_pow2( intermodel_comm, mat, max_recv_count, send_transform, recv_apply_transform, opts); break; case allreduce_algorithm::PAIRWISE_EXCHANGE_RING: pe_ring_allreduce<El::Device::CPU>( intermodel_comm, mat, max_recv_count, send_transform, recv_transform, recv_apply_transform, opts); break; case allreduce_algorithm::RING: ring_allreduce<El::Device::CPU>( intermodel_comm, mat, max_recv_count, send_transform, recv_transform, recv_apply_transform, opts); break; case allreduce_algorithm::RABENSEIFNER: rabenseifner_allreduce<El::Device::CPU>( intermodel_comm, mat, max_recv_count, send_transform, recv_transform, recv_apply_transform, opts); break; case allreduce_algorithm::DEFAULT: case allreduce_algorithm::DYNAMIC: default: throw lbann_exception("intermodel_allreduce: bad algorithm"); break; } } void lbann_comm::recursive_doubling_allreduce_pow2( const El::mpi::Comm comm, AbsMat& mat, int max_recv_count, std::function<uint8_t *(AbsMat&, El::IR, El::IR, int&, bool, int)> send_transform, std::function<int(uint8_t *, AbsMat&, bool)> recv_apply_transform, const lbann_comm::allreduce_options opts) { double ar_start = get_time(); const int rank = El::mpi::Rank(comm); const unsigned int nprocs = El::mpi::Size(comm); if (nprocs == 1) { return; // Nothing to do. } // This implementation requires a power-of-2 number of processes. if (nprocs & (nprocs - 1)) { throw lbann_exception("lbann_comm: recursive doubling allreduce requires" " a power-of-2 number of participating processes"); } uint8_t *max_recv_buf = get_collective_buffer(max_recv_count); uint8_t *recv_buf = max_recv_buf; unsigned int mask = 1; while (mask < nprocs) { int partner = rank ^ mask; // The rank we exchange with this step. const bool is_local = opts.no_local_trans && is_rank_node_local(partner, comm); // Transform the data we want to send. double send_trans_start = get_time(); int send_size; int recv_size = max_recv_count; uint8_t *send_buf = nullptr; if (is_local) { send_buf = (uint8_t *) mat.Buffer(); send_size = sizeof(DataType) * mat.Height() * mat.Width(); recv_size = send_size; recv_buf = get_collective_buffer(recv_size); } else { send_buf = send_transform(mat, El::ALL, El::ALL, send_size, false, 0); recv_buf = max_recv_buf; } ar_send_transform_time += get_time() - send_trans_start; bytes_sent += send_size; ar_bytes_sent += send_size; double sendrecv_start = get_time(); El::mpi::SendRecv(send_buf, send_size, partner, recv_buf, recv_size, partner, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; // Transform and reduce the received data. double recv_apply_trans_start = get_time(); recv_size = recv_apply_transform(recv_buf, mat, is_local); ar_recv_apply_transform_time += get_time() - recv_apply_trans_start; bytes_received += recv_size; ar_bytes_received += recv_size; mask <<= 1; } ar_time += get_time() - ar_start; } template <El::Device D> void lbann_comm::pe_ring_allreduce( const El::mpi::Comm comm, DMat<D>& mat, int max_recv_count, std::function<uint8_t *(AbsMat&, El::IR, El::IR, int&, bool, int)> send_transform, std::function<int(uint8_t *, AbsMat&)> recv_transform, std::function<int(uint8_t *, AbsMat&, bool)> recv_apply_transform, const lbann_comm::allreduce_options opts) { double ar_start = get_time(); const int rank = El::mpi::Rank(comm); const int nprocs = El::mpi::Size(comm); if (nprocs == 1) { return; // Nothing to do. } // Compute the number of columns each processor sends. // If it doesn't divide evenly, give one extra to the earlier ranks. const El::Int cols_per_proc = mat.Width() / nprocs; const El::Int cols_remainder = mat.Width() % nprocs; // Compute the lengths/ends of the slices. std::vector<El::Int> slice_lengths(nprocs, cols_per_proc); for (int i = 0; i < cols_remainder; ++i) { slice_lengths[i] += 1; } std::vector<El::Int> slice_ends(nprocs); std::partial_sum(slice_lengths.begin(), slice_lengths.end(), slice_ends.begin()); std::vector<uint8_t *> max_recv_buffers(opts.max_reduces, nullptr); for (size_t i = 0; i < max_recv_buffers.size(); ++i) { max_recv_buffers[i] = get_collective_buffer(max_recv_count, i); } // Local slice of our accumulated data. auto accum_view = mat(El::ALL, El::IR(slice_ends[rank] - slice_lengths[rank], slice_ends[rank])); // Do a pairwise-exchange reduce-scatter. double rs_start = get_time(); for (int outer_step = 1; outer_step < nprocs; outer_step += opts.max_reduces) { const int reduces_this_step = std::min(opts.max_reduces, nprocs - outer_step); std::vector<El::mpi::Request<uint8_t>> send_reqs(reduces_this_step); std::vector<El::mpi::Request<uint8_t>> recv_reqs(reduces_this_step); std::vector<uint8_t *> recv_buffers(max_recv_buffers); int num_local_recvs = 0; std::vector<bool> local_recvs(reduces_this_step, false); for (int step = outer_step; step < outer_step + reduces_this_step; ++step) { const int reduce_idx = step - outer_step; // Compute where we send to/receive from. const int dst = (rank + step) % nprocs; const int src = (rank - step + nprocs) % nprocs; const bool is_send_local = opts.no_local_trans && is_rank_node_local(dst, comm); const bool is_recv_local = opts.no_local_trans && is_rank_node_local(src, comm); // Post the receive. double recv_start = get_time(); int recv_size = max_recv_count; if (is_recv_local) { recv_size = sizeof(DataType) * accum_view.Height() * accum_view.Width(); recv_buffers[reduce_idx] = get_collective_buffer(recv_size, num_local_recvs); ++num_local_recvs; local_recvs[reduce_idx] = is_recv_local; } El::mpi::IRecv(recv_buffers[reduce_idx], recv_size, src, comm, recv_reqs[reduce_idx]); double recv_tot = get_time() - recv_start; ar_recv_time += recv_tot; ar_rs_recv_time += recv_tot; // Transform the data we send. We do not look at the same chunk of data // twice. double send_trans_start = get_time(); int send_size; uint8_t *send_buf = nullptr; if (is_send_local) { auto send_view = mat(El::ALL, El::IR(slice_ends[dst] - slice_lengths[dst], slice_ends[dst])); send_buf = (uint8_t *) send_view.Buffer(); send_size = sizeof(DataType) * send_view.Height() * send_view.Width(); } else { send_buf = send_transform( mat, El::ALL, El::IR(slice_ends[dst] - slice_lengths[dst], slice_ends[dst]), send_size, true, reduce_idx); } ar_send_transform_time += get_time() - send_trans_start; bytes_sent += send_size; ar_bytes_sent += send_size; ar_rs_bytes_sent += send_size; // Post the send. double send_start = get_time(); El::mpi::ISend(send_buf, send_size, dst, comm, send_reqs[reduce_idx]); double send_tot = get_time() - send_start; ar_send_time += send_tot; ar_rs_send_time += send_tot; } // Complete the receives (in any order). // We need to extract the raw MPI_Request because Elemental does not support // MPI_Waitany. std::vector<MPI_Request> raw_reqs(reduces_this_step); for (int i = 0; i < reduces_this_step; ++i) { raw_reqs[i] = recv_reqs[i].backend; } for (int i = 0; i < reduces_this_step; ++i) { int completed_idx; double recv_start = get_time(); MPI_Waitany(reduces_this_step, raw_reqs.data(), &completed_idx, MPI_STATUS_IGNORE); double recv_tot = get_time() - recv_start; ar_recv_time += recv_tot; ar_rs_recv_time += recv_tot; double recv_apply_trans_start = get_time(); int recv_size = recv_apply_transform( recv_buffers[completed_idx], accum_view, local_recvs[completed_idx]); ar_recv_apply_transform_time += get_time() - recv_apply_trans_start; bytes_received += recv_size; ar_bytes_received += recv_size; ar_rs_bytes_received += recv_size; } // Complete all the sends. double send_start = get_time(); El::mpi::WaitAll(reduces_this_step, send_reqs.data(), MPI_STATUSES_IGNORE); double send_tot = get_time() - send_start; ar_send_time += send_tot; ar_rs_send_time += send_tot; } uint8_t *recv_buf = max_recv_buffers[0]; // Get a regular recv buffer. ar_rs_time += get_time() - rs_start; // Do a ring allgather. double ag_start = get_time(); const int src = (rank - 1 + nprocs) % nprocs; const int dst = (rank + 1) % nprocs; // Apply the transform to our locally-accumulated slice of the data. // Since the same data is cycled to every process, we do not do the // no_local_trans here. int send_size; // Do the first step where we forward our local data. { double send_trans_start = get_time(); uint8_t *send_buf = send_transform( mat, El::ALL, El::IR(slice_ends[rank] - slice_lengths[rank], slice_ends[rank]), send_size, false, 0); ar_send_transform_time += get_time() - send_trans_start; const int data_src = (rank - 1 + nprocs) % nprocs; bytes_sent += send_size; ar_bytes_sent += send_size; ar_ag_bytes_sent += send_size; auto recv_view = mat(El::ALL, El::IR(slice_ends[data_src] - slice_lengths[data_src], slice_ends[data_src])); // If we can, receive directly into the destination matrix. if (opts.id_recv) { recv_buf = (uint8_t *) recv_view.Buffer(); max_recv_count = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } double sendrecv_start = get_time(); El::mpi::SendRecv(send_buf, send_size, dst, recv_buf, max_recv_count, src, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_ag_send_time += sendrecv_tot; ar_ag_recv_time += sendrecv_tot; double recv_trans_start = get_time(); int recv_size = 0; if (opts.id_recv) { recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { recv_size = recv_transform(recv_buf, recv_view); } ar_recv_transform_time += get_time() - recv_trans_start; bytes_received += recv_size; ar_bytes_received += recv_size; ar_ag_bytes_received += send_size; send_size = recv_size; } // Now do the remaining nprocs - 2 steps. // We always send from recv_buf and receive to recv_buf2, swapping // pointers to avoid copying. uint8_t *recv_buf2 = nullptr; if (!opts.id_recv) { recv_buf2 = get_collective_buffer(max_recv_count, 1); } for (int step = 1; step < nprocs - 1; ++step) { // Compute where the data we get is coming from. const int data_src = (rank - step - 1 + nprocs) % nprocs; auto recv_view = mat(El::ALL, El::IR(slice_ends[data_src] - slice_lengths[data_src], slice_ends[data_src])); if (opts.id_recv) { recv_buf2 = (uint8_t *) recv_view.Buffer(); max_recv_count = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } bytes_sent += send_size; ar_bytes_sent += send_size; ar_ag_bytes_sent += send_size; double sendrecv_start = get_time(); El::mpi::SendRecv(recv_buf, send_size, dst, recv_buf2, max_recv_count, src, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_ag_send_time += sendrecv_tot; ar_ag_recv_time += sendrecv_tot; double recv_trans_start = get_time(); int recv_size = 0; if (opts.id_recv) { recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { recv_size = recv_transform(recv_buf2, recv_view); } ar_recv_transform_time += get_time() - recv_trans_start; bytes_received += recv_size; // Swap the send and receive buffers. std::swap(recv_buf, recv_buf2); send_size = recv_size; ar_bytes_received += recv_size; ar_ag_bytes_received += send_size; } ar_ag_time += get_time() - ag_start; ar_time += get_time() - ar_start; } template <El::Device D> void lbann_comm::ring_allreduce( const El::mpi::Comm comm, DMat<D>& mat, int max_recv_count, std::function<uint8_t *(AbsMat&, El::IR, El::IR, int&, bool, int)> send_transform, std::function<int(uint8_t *, AbsMat&)> recv_transform, std::function<int(uint8_t *, AbsMat&, bool)> recv_apply_transform, const lbann_comm::allreduce_options opts) { double ar_start = get_time(); const int rank = El::mpi::Rank(comm); const int nprocs = El::mpi::Size(comm); if (nprocs == 1) { return; // Nothing to do. } // Compute the number of columns each processor sends. const El::Int cols_per_proc = mat.Width() / nprocs; const El::Int cols_remainder = mat.Width() % nprocs; // Compute the lengths/ends of the slices. std::vector<El::Int> slice_lengths(nprocs, cols_per_proc); for (int i = 0; i < cols_remainder; ++i) { slice_lengths[i] += 1; } std::vector<El::Int> slice_ends(nprocs); std::partial_sum(slice_lengths.begin(), slice_lengths.end(), slice_ends.begin()); uint8_t *max_recv_buf = get_collective_buffer(max_recv_count); uint8_t *recv_buf = max_recv_buf; // Compute source/destination in the ring. const int src = (rank - 1 + nprocs) % nprocs; const int dst = (rank + 1) % nprocs; const bool is_send_local = opts.no_local_trans && is_rank_node_local(dst, comm); const bool is_recv_local = opts.no_local_trans && is_rank_node_local(src, comm); // Do a ring-based reduce-scatter. // This is like the pairwise-exchange reduce-scatter except instead of // rank i accumulating only slice i, the slices are cycled around and // each node accumulates its portion into the slice when it passes // through. After the nprocs-1 steps slice k will be on rank // (k + nprocs - 1) % nprocs. double rs_start = get_time(); for (int step = 0; step < nprocs - 1; ++step) { // Compute the slices to send/recv. const int send_slice = (rank - step + nprocs) % nprocs; const int recv_slice = (rank - step - 1 + nprocs) % nprocs; // Transform the data to send. double send_trans_start = get_time(); int send_size; int recv_size = max_recv_count; uint8_t *send_buf = nullptr; if (is_send_local) { auto send_view = mat(El::ALL, El::IR(slice_ends[dst] - slice_lengths[dst], slice_ends[dst])); send_buf = (uint8_t *) send_view.Buffer(); send_size = sizeof(DataType) * send_view.Height() * send_view.Width(); } else { send_buf = send_transform(mat, El::ALL, El::IR(slice_ends[send_slice] - slice_lengths[send_slice], slice_ends[send_slice]), send_size, false, 0); } auto recv_view = mat(El::ALL, El::IR(slice_ends[recv_slice] - slice_lengths[recv_slice], slice_ends[recv_slice])); if (is_recv_local) { recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); recv_buf = get_collective_buffer(recv_size); } else { recv_buf = max_recv_buf; } ar_send_transform_time += get_time() - send_trans_start; bytes_sent += send_size; ar_bytes_sent += send_size; ar_rs_bytes_sent += send_size; double sendrecv_start = get_time(); El::mpi::SendRecv(send_buf, send_size, dst, recv_buf, recv_size, src, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_rs_send_time += sendrecv_tot; ar_rs_recv_time += sendrecv_tot; double recv_apply_trans_start = get_time(); recv_size = recv_apply_transform(recv_buf, recv_view, is_recv_local); ar_recv_apply_transform_time += get_time() - recv_apply_trans_start; bytes_received += recv_size; ar_bytes_received += recv_size; ar_rs_bytes_received += recv_size; } recv_buf = max_recv_buf; // Ensure we're back to the original. ar_rs_time += get_time() - rs_start; // Do a ring allgather, first applying the transform to local data. double ag_start = get_time(); int send_size; { const int send_slice = (rank + 1) % nprocs; const int recv_slice = rank; double send_trans_start = get_time(); uint8_t *send_buf = send_transform( mat, El::ALL, El::IR(slice_ends[send_slice] - slice_lengths[send_slice], slice_ends[send_slice]), send_size, false, 0); ar_send_transform_time += get_time() - send_trans_start; bytes_sent += send_size; ar_bytes_sent += send_size; ar_ag_bytes_sent += send_size; auto recv_view = mat(El::ALL, El::IR(slice_ends[recv_slice] - slice_lengths[recv_slice], slice_ends[recv_slice])); // If we can, receive directly into the destination matrix. if (opts.id_recv) { recv_buf = (uint8_t *) recv_view.Buffer(); max_recv_count = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } double sendrecv_start = get_time(); El::mpi::SendRecv(send_buf, send_size, dst, recv_buf, max_recv_count, src, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_ag_send_time += sendrecv_tot; ar_ag_recv_time += sendrecv_tot; double recv_trans_start = get_time(); int recv_size = 0; if (opts.id_recv) { recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { recv_size = recv_transform(recv_buf, recv_view); } ar_recv_transform_time += get_time() - recv_trans_start; send_size = recv_size; bytes_received += recv_size; ar_bytes_received += recv_size; ar_ag_bytes_received += recv_size; } uint8_t *recv_buf2 = nullptr; if (!opts.id_recv) { recv_buf2 = get_collective_buffer(max_recv_count, 1); } for (int step = 1; step < nprocs - 1; ++step) { const int recv_slice = (rank - step + nprocs) % nprocs; auto recv_view = mat(El::ALL, El::IR(slice_ends[recv_slice] - slice_lengths[recv_slice], slice_ends[recv_slice])); if (opts.id_recv) { recv_buf2 = (uint8_t *) recv_view.Buffer(); max_recv_count = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } bytes_sent += send_size; ar_bytes_sent += send_size; ar_ag_bytes_sent += send_size; double sendrecv_start = get_time(); El::mpi::SendRecv(recv_buf, send_size, dst, recv_buf2, max_recv_count, src, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_ag_send_time += sendrecv_tot; ar_ag_recv_time += sendrecv_tot; double recv_trans_start = get_time(); int recv_size = 0; if (opts.id_recv) { recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { recv_size = recv_transform(recv_buf2, recv_view); } ar_recv_transform_time += get_time() - recv_trans_start; // Swap the send and receive buffers. std::swap(recv_buf, recv_buf2); send_size = recv_size; bytes_received += recv_size; ar_bytes_received += recv_size; ar_ag_bytes_received += recv_size; } ar_ag_time += get_time() - ag_start; ar_time += get_time() - ar_start; } template <El::Device D> void lbann_comm::rabenseifner_allreduce( const El::mpi::Comm comm, DMat<D>& mat, int max_recv_count, std::function<uint8_t *(AbsMat&, El::IR, El::IR, int&, bool, int)> send_transform, std::function<int(uint8_t *, AbsMat&)> recv_transform, std::function<int(uint8_t *, AbsMat&, bool)> recv_apply_transform, const lbann_comm::allreduce_options opts) { double ar_start = get_time(); const int rank = El::mpi::Rank(comm); const unsigned int nprocs = El::mpi::Size(comm); if (nprocs == 1) { return; // Nothing to do. } // This implementation requires a power-of-2 number of processes. if (nprocs & (nprocs - 1)) { throw lbann_exception("lbann_comm: Rabenseifner allreduce requires" " a power-of-2 number of participating processes"); } // Compute the slices on each processor. const El::Int cols_per_proc = mat.Width() / nprocs; const El::Int cols_remainder = mat.Width() % nprocs; // Compute the lengths/ends of the slices. std::vector<El::Int> slice_lengths(nprocs, cols_per_proc); for (int i = 0; i < cols_remainder; ++i) { slice_lengths[i] += 1; } std::vector<El::Int> slice_ends(nprocs); std::partial_sum(slice_lengths.begin(), slice_lengths.end(), slice_ends.begin()); // Do a recursive-halving reduce-scatter. // In each step here a process sends all the data needed for the other // "half" of the processes. i.e. each process sends half their data in the // first step, a quarter in the second step, etc. double rs_start = get_time(); unsigned int partner_mask = nprocs >> 1; unsigned int slice_mask = 1; unsigned int send_idx = 0; unsigned int recv_idx = 0; unsigned int last_idx = nprocs; uint8_t *recv_buf = get_collective_buffer(max_recv_count); while (partner_mask > 0) { int partner = rank ^ partner_mask; // The rank we exchange with this step. const bool is_local = opts.no_local_trans && is_rank_node_local(partner, comm); // Determine the range of data to send/recv. El::IR send_range, recv_range; if (rank < partner) { send_idx = recv_idx + nprocs / (slice_mask*2); send_range = El::IR(slice_ends[send_idx] - slice_lengths[send_idx], slice_ends[last_idx-1]); recv_range = El::IR(slice_ends[recv_idx] - slice_lengths[recv_idx], slice_ends[send_idx-1]); } else { recv_idx = send_idx + nprocs / (slice_mask*2); send_range = El::IR(slice_ends[send_idx] - slice_lengths[send_idx], slice_ends[recv_idx-1]); recv_range = El::IR(slice_ends[recv_idx] - slice_lengths[recv_idx], slice_ends[last_idx-1]); } auto recv_view = mat(El::ALL, recv_range); // Transform the data to send. double send_trans_start = get_time(); int send_size; int recv_size = max_recv_count; uint8_t *send_buf = nullptr; if (is_local) { auto send_view = mat(El::ALL, send_range); send_buf = (uint8_t *) send_view.Buffer(); send_size = sizeof(DataType) * send_view.Height() * send_view.Width(); recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { send_buf = send_transform(mat, El::ALL, send_range, send_size, false, 0); } ar_send_transform_time += get_time() - send_trans_start; bytes_sent += send_size; ar_bytes_sent += send_size; ar_rs_bytes_sent += send_size; double sendrecv_start = get_time(); El::mpi::SendRecv(send_buf, send_size, partner, recv_buf, recv_size, partner, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_rs_send_time += sendrecv_tot; ar_rs_recv_time += sendrecv_tot; // Transform the received data. double recv_apply_trans_start = get_time(); recv_size = recv_apply_transform(recv_buf, recv_view, is_local); ar_recv_apply_transform_time += get_time() - recv_apply_trans_start; bytes_received += recv_size; ar_bytes_received += recv_size; ar_rs_bytes_received += send_size; // Update info for next iteration. // Except last_idx when needed for the allgather. send_idx = recv_idx; partner_mask >>= 1; slice_mask <<= 1; if (partner_mask > 0) { last_idx = recv_idx + nprocs / (slice_mask); } } ar_rs_time += get_time() - rs_start; // Do a recursive-doubling algather. double ag_start = get_time(); slice_mask >>= 1; partner_mask = 1; // Now do the remaining steps. while (partner_mask < nprocs) { int partner = rank ^ partner_mask; const bool is_local = opts.no_local_trans && is_rank_node_local(partner, comm); // Determine range to send/recv. El::IR send_range, recv_range; if (rank < partner) { if (slice_mask != nprocs / 2) { last_idx = last_idx + nprocs / (slice_mask*2); } recv_idx = send_idx + nprocs / (slice_mask*2); send_range = El::IR(slice_ends[send_idx] - slice_lengths[send_idx], slice_ends[recv_idx-1]); recv_range = El::IR(slice_ends[recv_idx] - slice_lengths[recv_idx], slice_ends[last_idx-1]); } else { recv_idx = send_idx - nprocs / (slice_mask*2); send_range = El::IR(slice_ends[send_idx] - slice_lengths[send_idx], slice_ends[last_idx-1]); recv_range = El::IR(slice_ends[recv_idx] - slice_lengths[recv_idx], slice_ends[send_idx-1]); } auto recv_view = mat(El::ALL, recv_range); // Transform the data to send. double send_trans_start = get_time(); int send_size; int recv_size = max_recv_count; uint8_t *send_buf = nullptr; if (is_local) { auto send_view = mat(El::ALL, send_range); send_buf = (uint8_t *) send_view.Buffer(); send_size = sizeof(DataType) * send_view.Height() * send_view.Width(); recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { send_buf = send_transform(mat, El::ALL, send_range, send_size, false, 0); } ar_send_transform_time += get_time() - send_trans_start; if (opts.id_recv || is_local) { recv_buf = (uint8_t *) recv_view.Buffer(); recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } bytes_sent += send_size; ar_bytes_sent += send_size; ar_ag_bytes_sent += send_size; double sendrecv_start = get_time(); El::mpi::SendRecv(send_buf, send_size, partner, recv_buf, recv_size, partner, comm); double sendrecv_tot = get_time() - sendrecv_start; ar_send_time += sendrecv_tot; ar_recv_time += sendrecv_tot; ar_ag_send_time += sendrecv_tot; ar_ag_recv_time += sendrecv_tot; double recv_trans_start = get_time(); if (opts.id_recv) { recv_size = sizeof(DataType) * recv_view.Height() * recv_view.Width(); } else { recv_size = recv_transform(recv_buf, recv_view); } ar_recv_transform_time += get_time() - recv_trans_start; bytes_received += recv_size; ar_bytes_received += recv_size; ar_ag_bytes_received += send_size; // Update for the next iteration. if (rank > partner) { send_idx = recv_idx; } partner_mask <<= 1; slice_mask >>= 1; } ar_ag_time += get_time() - ag_start; ar_time += get_time() - ar_start; } void lbann_comm::setup_node_comm() { // Get string specifying compute node char node_name[MPI_MAX_PROCESSOR_NAME]; int node_name_len; checkMPI(MPI_Get_processor_name(node_name, &node_name_len)); const std::string node_string(node_name); // Hash node names and split MPI processes int hash = std::hash<std::string>()(node_string); hash = hash >= 0 ? hash : -hash; // Make sure hash is non-negative El::mpi::Comm hash_comm; El::mpi::Split(get_world_comm(), hash, El::mpi::Rank(get_world_comm()), hash_comm); const int hash_comm_size = El::mpi::Size(hash_comm); // Compare node names and split MPI processes auto *node_name_list = new char[hash_comm_size*MPI_MAX_PROCESSOR_NAME]; checkMPI(MPI_Allgather(node_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, node_name_list, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, hash_comm.comm)); int node_num = El::mpi::Rank(hash_comm); for(int i=0; i<hash_comm_size; ++i) { const std::string other_node_string(node_name_list + i*MPI_MAX_PROCESSOR_NAME); if(node_string == other_node_string) { node_num = i; break; } } delete[] node_name_list; El::mpi::Split(hash_comm, node_num, El::mpi::Rank(get_world_comm()), node_comm); El::mpi::Free(hash_comm); // Set up list of ranks that are local. int node_comm_size = El::mpi::Size(node_comm); for (int i = 0; i < node_comm_size; ++i) { world_ranks_on_node.push_back( El::mpi::Translate(node_comm, i, get_world_comm())); } } void lbann_comm::setup_threads() { const char* env_num_threads = getenv("OMP_NUM_THREADS"); if (env_num_threads != nullptr){ threads_per_proc = std::atoi(env_num_threads); } else { threads_per_proc = std::thread::hardware_concurrency() / procs_per_node; } reset_threads(); } void lbann_comm::reset_threads() { if (threads_per_proc != omp_get_max_threads()) { omp_set_num_threads(threads_per_proc); } } uint8_t *lbann_comm::get_collective_buffer(size_t size, size_t idx) { auto buf_iter = collective_bufs.find(size); if (buf_iter == collective_bufs.end()) { if (idx != 0) { throw lbann_exception("get_collective_buffer: non-contiguous index"); } collective_bufs.emplace(std::make_pair(size, std::vector<uint8_t *>())); collective_bufs[size].push_back(new uint8_t[size]); return collective_bufs[size][0]; } else { if (collective_bufs[size].size() > idx) { return collective_bufs[size][idx]; } else { if (collective_bufs[size].size() != idx) { throw lbann_exception("get_collective_buffer: non-contiguous index"); } collective_bufs[size].push_back(new uint8_t[size]); return collective_bufs[size][idx]; } } } #ifdef LBANN_HAS_ALUMINUM ::Al::MPICommunicator* lbann_comm::get_al_comm(El::mpi::Comm c, std::type_index t) { // Construct Aluminum communicator if needed const al_comms_key_type key(c.comm, t); if (m_al_comms.count(key) == 0) { if (t == std::type_index(typeid(::Al::MPIBackend))) { m_al_comms[key] = al_comms_val_type(new ::Al::MPICommunicator(c.comm)); } /// @todo MPI-CUDA backend #ifdef AL_HAS_NCCL if (t == std::type_index(typeid(::Al::NCCLBackend))) { auto&& val = new ::Al::NCCLCommunicator(c.comm, El::GPUManager::Stream()); m_al_comms[key] = al_comms_val_type(val); } #endif // AL_HAS_NCCL } // Return Aluminum communicator auto&& comm = m_al_comms[key].get(); if (comm == nullptr) { throw lbann_exception("Could not get Aluminum communicator"); } return comm; } ::Al::ReductionOperator lbann_comm::mpi_op_to_al_op(El::mpi::Op op) { if (op == El::mpi::SUM) { return ::Al::ReductionOperator::sum; } else if (op == El::mpi::PROD) { return ::Al::ReductionOperator::prod; } else if (op == El::mpi::MIN) { return ::Al::ReductionOperator::min; } else if (op == El::mpi::MAX) { return ::Al::ReductionOperator::max; } else { throw lbann_exception("Reduction operator not supported in Aluminum"); } } #endif void lbann_comm::lbann_comm_abort(std::string msg) { throw lbann_exception(msg); } } // namespace lbann
1
13,031
Aren't we able to handle the case where we have MPI-CUDA without NCCL, even if it's suboptimal?
LLNL-lbann
cpp
@@ -37,6 +37,12 @@ Copyright (C) 2001-2016 EQEMu Development Team (http://eqemulator.net) extern QueryServ* QServ; +namespace detail +{ + static const uint32 PhantomStatId = 999999; +} + + void Mob::TemporaryPets(uint16 spell_id, Mob *targ, const char *name_override, uint32 duration_override, bool followme, bool sticktarg, uint16 *eye_id) { //It might not be a bad idea to put these into the database, eventually..
1
/* EQEMu: Everquest Server Emulator Copyright (C) 2001-2016 EQEMu Development Team (http://eqemulator.net) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY except by those people which sell it, which are required to give you total support for your newly bought product; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "../common/classes.h" #include "../common/global_define.h" #include "../common/eqemu_logsys.h" #include "../common/eq_packet_structs.h" #include "../common/races.h" #include "../common/spdat.h" #include "../common/string_util.h" #include "aa.h" #include "client.h" #include "corpse.h" #include "groups.h" #include "mob.h" #include "queryserv.h" #include "raids.h" #include "string_ids.h" #include "titles.h" #include "zonedb.h" #include "zone_store.h" extern QueryServ* QServ; void Mob::TemporaryPets(uint16 spell_id, Mob *targ, const char *name_override, uint32 duration_override, bool followme, bool sticktarg, uint16 *eye_id) { //It might not be a bad idea to put these into the database, eventually.. //Dook- swarms and wards // do nothing if it's a corpse if (targ != nullptr && targ->IsCorpse()) return; // yep, even these need pet power! int act_power = 0; if (IsClient()) { act_power = CastToClient()->GetFocusEffect(focusPetPower, spell_id); act_power = CastToClient()->mod_pet_power(act_power, spell_id); } PetRecord record; if (!content_db.GetPoweredPetEntry(spells[spell_id].teleport_zone, act_power, &record)) { LogError("Unknown swarm pet spell id: {}, check pets table", spell_id); Message(Chat::Red, "Unable to find data for pet %s", spells[spell_id].teleport_zone); return; } SwarmPet_Struct pet; pet.count = 1; pet.duration = 1; for (int x = 0; x < MAX_SWARM_PETS; x++) { if (spells[spell_id].effectid[x] == SE_TemporaryPets) { pet.count = spells[spell_id].base[x]; pet.duration = spells[spell_id].max[x]; } } pet.duration += GetFocusEffect(focusSwarmPetDuration, spell_id) / 1000; pet.npc_id = record.npc_type; NPCType *made_npc = nullptr; const NPCType *npc_type = content_db.LoadNPCTypesData(pet.npc_id); if (npc_type == nullptr) { //log write LogError("Unknown npc type for swarm pet spell id: [{}]", spell_id); Message(0, "Unable to find pet!"); return; } if (name_override != nullptr) { //we have to make a custom NPC type for this name change made_npc = new NPCType; memcpy(made_npc, npc_type, sizeof(NPCType)); strcpy(made_npc->name, name_override); npc_type = made_npc; } int summon_count = 0; summon_count = pet.count; if (summon_count > MAX_SWARM_PETS) summon_count = MAX_SWARM_PETS; static const glm::vec2 swarmPetLocations[MAX_SWARM_PETS] = { glm::vec2(5, 5), glm::vec2(-5, 5), glm::vec2(5, -5), glm::vec2(-5, -5), glm::vec2(10, 10), glm::vec2(-10, 10), glm::vec2(10, -10), glm::vec2(-10, -10), glm::vec2(8, 8), glm::vec2(-8, 8), glm::vec2(8, -8), glm::vec2(-8, -8) }; NPC* swarm_pet_npc = nullptr; while (summon_count > 0) { int pet_duration = pet.duration; if (duration_override > 0) pet_duration = duration_override; //this is a little messy, but the only way to do it right //it would be possible to optimize out this copy for the last pet, but oh well NPCType *npc_dup = nullptr; if (made_npc != nullptr) { npc_dup = new NPCType; memcpy(npc_dup, made_npc, sizeof(NPCType)); } swarm_pet_npc = new NPC( (npc_dup != nullptr) ? npc_dup : npc_type, //make sure we give the NPC the correct data pointer 0, GetPosition() + glm::vec4(swarmPetLocations[summon_count], 0.0f, 0.0f), GravityBehavior::Water); if (followme) swarm_pet_npc->SetFollowID(GetID()); if (!swarm_pet_npc->GetSwarmInfo()) { auto nSI = new SwarmPet; swarm_pet_npc->SetSwarmInfo(nSI); swarm_pet_npc->GetSwarmInfo()->duration = new Timer(pet_duration * 1000); } else { swarm_pet_npc->GetSwarmInfo()->duration->Start(pet_duration * 1000); } swarm_pet_npc->StartSwarmTimer(pet_duration * 1000); //removing this prevents the pet from attacking swarm_pet_npc->GetSwarmInfo()->owner_id = GetID(); //give the pets somebody to "love" if (targ != nullptr) { swarm_pet_npc->AddToHateList(targ, 1000, 1000); if (RuleB(Spells, SwarmPetTargetLock) || sticktarg) swarm_pet_npc->GetSwarmInfo()->target = targ->GetID(); else swarm_pet_npc->GetSwarmInfo()->target = 0; } //we allocated a new NPC type object, give the NPC ownership of that memory if (npc_dup != nullptr) swarm_pet_npc->GiveNPCTypeData(npc_dup); entity_list.AddNPC(swarm_pet_npc, true, true); summon_count--; } if (swarm_pet_npc && IsClient() && eye_id != nullptr) { *eye_id = swarm_pet_npc->GetID(); } //the target of these swarm pets will take offense to being cast on... if (targ != nullptr) targ->AddToHateList(this, 1, 0); // The other pointers we make are handled elsewhere. delete made_npc; } void Mob::TypesTemporaryPets(uint32 typesid, Mob *targ, const char *name_override, uint32 duration_override, bool followme, bool sticktarg) { SwarmPet_Struct pet; pet.count = 1; pet.duration = 1; pet.npc_id = typesid; NPCType *made_npc = nullptr; const NPCType *npc_type = content_db.LoadNPCTypesData(typesid); if(npc_type == nullptr) { //log write LogError("Unknown npc type for swarm pet type id: [{}]", typesid); Message(0,"Unable to find pet!"); return; } if(name_override != nullptr) { //we have to make a custom NPC type for this name change made_npc = new NPCType; memcpy(made_npc, npc_type, sizeof(NPCType)); strcpy(made_npc->name, name_override); npc_type = made_npc; } int summon_count = 0; summon_count = pet.count; if(summon_count > MAX_SWARM_PETS) summon_count = MAX_SWARM_PETS; static const glm::vec2 swarmPetLocations[MAX_SWARM_PETS] = { glm::vec2(5, 5), glm::vec2(-5, 5), glm::vec2(5, -5), glm::vec2(-5, -5), glm::vec2(10, 10), glm::vec2(-10, 10), glm::vec2(10, -10), glm::vec2(-10, -10), glm::vec2(8, 8), glm::vec2(-8, 8), glm::vec2(8, -8), glm::vec2(-8, -8) };; while(summon_count > 0) { int pet_duration = pet.duration; if(duration_override > 0) pet_duration = duration_override; //this is a little messy, but the only way to do it right //it would be possible to optimize out this copy for the last pet, but oh well NPCType *npc_dup = nullptr; if(made_npc != nullptr) { npc_dup = new NPCType; memcpy(npc_dup, made_npc, sizeof(NPCType)); } NPC* swarm_pet_npc = new NPC( (npc_dup!=nullptr)?npc_dup:npc_type, //make sure we give the NPC the correct data pointer 0, GetPosition() + glm::vec4(swarmPetLocations[summon_count], 0.0f, 0.0f), GravityBehavior::Water); if (followme) swarm_pet_npc->SetFollowID(GetID()); if(!swarm_pet_npc->GetSwarmInfo()){ auto nSI = new SwarmPet; swarm_pet_npc->SetSwarmInfo(nSI); swarm_pet_npc->GetSwarmInfo()->duration = new Timer(pet_duration*1000); } else { swarm_pet_npc->GetSwarmInfo()->duration->Start(pet_duration*1000); } swarm_pet_npc->StartSwarmTimer(pet_duration * 1000); //removing this prevents the pet from attacking swarm_pet_npc->GetSwarmInfo()->owner_id = GetID(); //give the pets somebody to "love" if(targ != nullptr){ swarm_pet_npc->AddToHateList(targ, 1000, 1000); if (RuleB(Spells, SwarmPetTargetLock) || sticktarg) swarm_pet_npc->GetSwarmInfo()->target = targ->GetID(); else swarm_pet_npc->GetSwarmInfo()->target = 0; } //we allocated a new NPC type object, give the NPC ownership of that memory if(npc_dup != nullptr) swarm_pet_npc->GiveNPCTypeData(npc_dup); entity_list.AddNPC(swarm_pet_npc, true, true); summon_count--; } // The other pointers we make are handled elsewhere. delete made_npc; } void Mob::WakeTheDead(uint16 spell_id, Mob *target, uint32 duration) { Corpse *CorpseToUse = nullptr; CorpseToUse = entity_list.GetClosestCorpse(this, nullptr); if(!CorpseToUse) return; //assuming we have pets in our table; we take the first pet as a base type. const NPCType *base_type = content_db.LoadNPCTypesData(500); auto make_npc = new NPCType; memcpy(make_npc, base_type, sizeof(NPCType)); //combat stats make_npc->AC = ((GetLevel() * 7) + 550); make_npc->ATK = GetLevel(); make_npc->max_dmg = (GetLevel() * 4) + 2; make_npc->min_dmg = 1; //base stats make_npc->current_hp = (GetLevel() * 55); make_npc->max_hp = (GetLevel() * 55); make_npc->STR = 85 + (GetLevel() * 3); make_npc->STA = 85 + (GetLevel() * 3); make_npc->DEX = 85 + (GetLevel() * 3); make_npc->AGI = 85 + (GetLevel() * 3); make_npc->INT = 85 + (GetLevel() * 3); make_npc->WIS = 85 + (GetLevel() * 3); make_npc->CHA = 85 + (GetLevel() * 3); make_npc->MR = 25; make_npc->FR = 25; make_npc->CR = 25; make_npc->DR = 25; make_npc->PR = 25; //level class and gender make_npc->level = GetLevel(); make_npc->class_ = CorpseToUse->class_; make_npc->race = CorpseToUse->race; make_npc->gender = CorpseToUse->gender; make_npc->loottable_id = 0; //name char NewName[64]; sprintf(NewName, "%s`s Animated Corpse", GetCleanName()); strcpy(make_npc->name, NewName); //appearance make_npc->beard = CorpseToUse->beard; make_npc->beardcolor = CorpseToUse->beardcolor; make_npc->eyecolor1 = CorpseToUse->eyecolor1; make_npc->eyecolor2 = CorpseToUse->eyecolor2; make_npc->haircolor = CorpseToUse->haircolor; make_npc->hairstyle = CorpseToUse->hairstyle; make_npc->helmtexture = CorpseToUse->helmtexture; make_npc->luclinface = CorpseToUse->luclinface; make_npc->size = CorpseToUse->size; make_npc->texture = CorpseToUse->texture; //cast stuff.. based off of PEQ's if you want to change //it you'll have to mod this code, but most likely //most people will be using PEQ style for the first //part of their spell list; can't think of any smooth //way to do this //some basic combat mods here too since it's convienent switch(CorpseToUse->class_) { case CLERIC: make_npc->npc_spells_id = 1; break; case WIZARD: make_npc->npc_spells_id = 2; break; case NECROMANCER: make_npc->npc_spells_id = 3; break; case MAGICIAN: make_npc->npc_spells_id = 4; break; case ENCHANTER: make_npc->npc_spells_id = 5; break; case SHAMAN: make_npc->npc_spells_id = 6; break; case DRUID: make_npc->npc_spells_id = 7; break; case PALADIN: //SPECATK_TRIPLE strcpy(make_npc->special_abilities, "6,1"); make_npc->current_hp = make_npc->current_hp * 150 / 100; make_npc->max_hp = make_npc->max_hp * 150 / 100; make_npc->npc_spells_id = 8; break; case SHADOWKNIGHT: strcpy(make_npc->special_abilities, "6,1"); make_npc->current_hp = make_npc->current_hp * 150 / 100; make_npc->max_hp = make_npc->max_hp * 150 / 100; make_npc->npc_spells_id = 9; break; case RANGER: strcpy(make_npc->special_abilities, "7,1"); make_npc->current_hp = make_npc->current_hp * 135 / 100; make_npc->max_hp = make_npc->max_hp * 135 / 100; make_npc->npc_spells_id = 10; break; case BARD: strcpy(make_npc->special_abilities, "6,1"); make_npc->current_hp = make_npc->current_hp * 110 / 100; make_npc->max_hp = make_npc->max_hp * 110 / 100; make_npc->npc_spells_id = 11; break; case BEASTLORD: strcpy(make_npc->special_abilities, "7,1"); make_npc->current_hp = make_npc->current_hp * 110 / 100; make_npc->max_hp = make_npc->max_hp * 110 / 100; make_npc->npc_spells_id = 12; break; case ROGUE: strcpy(make_npc->special_abilities, "7,1"); make_npc->max_dmg = make_npc->max_dmg * 150 /100; make_npc->current_hp = make_npc->current_hp * 110 / 100; make_npc->max_hp = make_npc->max_hp * 110 / 100; break; case MONK: strcpy(make_npc->special_abilities, "7,1"); make_npc->max_dmg = make_npc->max_dmg * 150 /100; make_npc->current_hp = make_npc->current_hp * 135 / 100; make_npc->max_hp = make_npc->max_hp * 135 / 100; break; case WARRIOR: case BERSERKER: strcpy(make_npc->special_abilities, "7,1"); make_npc->max_dmg = make_npc->max_dmg * 150 /100; make_npc->current_hp = make_npc->current_hp * 175 / 100; make_npc->max_hp = make_npc->max_hp * 175 / 100; break; default: make_npc->npc_spells_id = 0; break; } make_npc->loottable_id = 0; make_npc->merchanttype = 0; make_npc->d_melee_texture1 = 0; make_npc->d_melee_texture2 = 0; auto npca = new NPC(make_npc, 0, GetPosition(), GravityBehavior::Water); if(!npca->GetSwarmInfo()){ auto nSI = new SwarmPet; npca->SetSwarmInfo(nSI); npca->GetSwarmInfo()->duration = new Timer(duration*1000); } else{ npca->GetSwarmInfo()->duration->Start(duration*1000); } npca->StartSwarmTimer(duration * 1000); npca->GetSwarmInfo()->owner_id = GetID(); //give the pet somebody to "love" if(target != nullptr){ npca->AddToHateList(target, 100000); npca->GetSwarmInfo()->target = target->GetID(); } //gear stuff, need to make sure there's //no situation where this stuff can be duped for (int x = EQ::invslot::EQUIPMENT_BEGIN; x <= EQ::invslot::EQUIPMENT_END; x++) { uint32 sitem = 0; sitem = CorpseToUse->GetWornItem(x); if(sitem){ const EQ::ItemData * itm = database.GetItem(sitem); npca->AddLootDrop(itm, &npca->itemlist, NPC::NewLootDropEntry(), true); } } //we allocated a new NPC type object, give the NPC ownership of that memory if(make_npc != nullptr) npca->GiveNPCTypeData(make_npc); entity_list.AddNPC(npca, true, true); //the target of these swarm pets will take offense to being cast on... if(target != nullptr) target->AddToHateList(this, 1, 0); } void Client::ResetAA() { SendClearAA(); RefundAA(); memset(&m_pp.aa_array[0], 0, sizeof(AA_Array) * MAX_PP_AA_ARRAY); int i = 0; for(auto &rank_value : aa_ranks) { auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(rank_value.first, rank_value.second.first); auto ability = ability_rank.first; auto rank = ability_rank.second; if(!rank) { continue; } m_pp.aa_array[i].AA = rank_value.first; m_pp.aa_array[i].value = rank_value.second.first; m_pp.aa_array[i].charges = rank_value.second.second; ++i; } for(int i = 0; i < _maxLeaderAA; ++i) m_pp.leader_abilities.ranks[i] = 0; m_pp.group_leadership_points = 0; m_pp.raid_leadership_points = 0; m_pp.group_leadership_exp = 0; m_pp.raid_leadership_exp = 0; database.DeleteCharacterLeadershipAAs(CharacterID()); } void Client::SendClearAA() { auto outapp = new EQApplicationPacket(OP_ClearLeadershipAbilities, 0); FastQueuePacket(&outapp); outapp = new EQApplicationPacket(OP_ClearAA, 0); FastQueuePacket(&outapp); } int Client::GroupLeadershipAAHealthEnhancement() { if (IsRaidGrouped()) { int bonus = 0; Raid *raid = GetRaid(); if (!raid) return 0; uint32 group_id = raid->GetGroup(this); if (group_id < 12 && raid->GroupCount(group_id) >= 3) { switch (raid->GetLeadershipAA(groupAAHealthEnhancement, group_id)) { case 1: bonus = 30; break; case 2: bonus = 60; break; case 3: bonus = 100; break; } } if (raid->RaidCount() >= 18) { switch (raid->GetLeadershipAA(raidAAHealthEnhancement)) { case 1: bonus += 30; break; case 2: bonus += 60; break; case 3: bonus += 100; break; } } return bonus; } Group *g = GetGroup(); if(!g || (g->GroupCount() < 3)) return 0; switch(g->GetLeadershipAA(groupAAHealthEnhancement)) { case 0: return 0; case 1: return 30; case 2: return 60; case 3: return 100; } return 0; } int Client::GroupLeadershipAAManaEnhancement() { if (IsRaidGrouped()) { int bonus = 0; Raid *raid = GetRaid(); if (!raid) return 0; uint32 group_id = raid->GetGroup(this); if (group_id < 12 && raid->GroupCount(group_id) >= 3) { switch (raid->GetLeadershipAA(groupAAManaEnhancement, group_id)) { case 1: bonus = 30; break; case 2: bonus = 60; break; case 3: bonus = 100; break; } } if (raid->RaidCount() >= 18) { switch (raid->GetLeadershipAA(raidAAManaEnhancement)) { case 1: bonus += 30; break; case 2: bonus += 60; break; case 3: bonus += 100; break; } } return bonus; } Group *g = GetGroup(); if(!g || (g->GroupCount() < 3)) return 0; switch(g->GetLeadershipAA(groupAAManaEnhancement)) { case 0: return 0; case 1: return 30; case 2: return 60; case 3: return 100; } return 0; } int Client::GroupLeadershipAAHealthRegeneration() { if (IsRaidGrouped()) { int bonus = 0; Raid *raid = GetRaid(); if (!raid) return 0; uint32 group_id = raid->GetGroup(this); if (group_id < 12 && raid->GroupCount(group_id) >= 3) { switch (raid->GetLeadershipAA(groupAAHealthRegeneration, group_id)) { case 1: bonus = 4; break; case 2: bonus = 6; break; case 3: bonus = 8; break; } } if (raid->RaidCount() >= 18) { switch (raid->GetLeadershipAA(raidAAHealthRegeneration)) { case 1: bonus += 4; break; case 2: bonus += 6; break; case 3: bonus += 8; break; } } return bonus; } Group *g = GetGroup(); if(!g || (g->GroupCount() < 3)) return 0; switch(g->GetLeadershipAA(groupAAHealthRegeneration)) { case 0: return 0; case 1: return 4; case 2: return 6; case 3: return 8; } return 0; } int Client::GroupLeadershipAAOffenseEnhancement() { if (IsRaidGrouped()) { int bonus = 0; Raid *raid = GetRaid(); if (!raid) return 0; uint32 group_id = raid->GetGroup(this); if (group_id < 12 && raid->GroupCount(group_id) >= 3) { switch (raid->GetLeadershipAA(groupAAOffenseEnhancement, group_id)) { case 1: bonus = 10; break; case 2: bonus = 19; break; case 3: bonus = 28; break; case 4: bonus = 34; break; case 5: bonus = 40; break; } } if (raid->RaidCount() >= 18) { switch (raid->GetLeadershipAA(raidAAOffenseEnhancement)) { case 1: bonus += 10; break; case 2: bonus += 19; break; case 3: bonus += 28; break; case 4: bonus += 34; break; case 5: bonus += 40; break; } } return bonus; } Group *g = GetGroup(); if(!g || (g->GroupCount() < 3)) return 0; switch(g->GetLeadershipAA(groupAAOffenseEnhancement)) { case 0: return 0; case 1: return 10; case 2: return 19; case 3: return 28; case 4: return 34; case 5: return 40; } return 0; } void Client::InspectBuffs(Client* Inspector, int Rank) { // At some point the removed the restriction of being a group member for this to work // not sure when, but the way it's coded now, it wouldn't work with mobs. if (!Inspector || Rank == 0) return; auto outapp = new EQApplicationPacket(OP_InspectBuffs, sizeof(InspectBuffs_Struct)); InspectBuffs_Struct *ib = (InspectBuffs_Struct *)outapp->pBuffer; uint32 buff_count = GetMaxTotalSlots(); uint32 packet_index = 0; for (uint32 i = 0; i < buff_count; i++) { if (buffs[i].spellid == SPELL_UNKNOWN) continue; ib->spell_id[packet_index] = buffs[i].spellid; if (Rank > 1) ib->tics_remaining[packet_index] = spells[buffs[i].spellid].buffdurationformula == DF_Permanent ? 0xFFFFFFFF : buffs[i].ticsremaining; packet_index++; } Inspector->FastQueuePacket(&outapp); } void Client::RefundAA() { int refunded = 0; auto rank_value = aa_ranks.begin(); while(rank_value != aa_ranks.end()) { auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(rank_value->first, rank_value->second.first); auto ability = ability_rank.first; auto rank = ability_rank.second; if(!ability) { ++rank_value; continue; } if(ability->charges > 0 && rank_value->second.second < 1) { ++rank_value; continue; } if(ability->grant_only) { ++rank_value; continue; } refunded += rank->total_cost; rank_value = aa_ranks.erase(rank_value); } if(refunded > 0) { m_pp.aapoints += refunded; SaveAA(); Save(); } SendAlternateAdvancementTable(); SendAlternateAdvancementPoints(); SendAlternateAdvancementStats(); } SwarmPet::SwarmPet() { target = 0; owner_id = 0; duration = nullptr; } SwarmPet::~SwarmPet() { target = 0; owner_id = 0; safe_delete(duration); } Mob *SwarmPet::GetOwner() { return entity_list.GetMobID(owner_id); } //New AA void Client::SendAlternateAdvancementTable() { for(auto &aa : zone->aa_abilities) { uint32 charges = 0; auto ranks = GetAA(aa.second->first_rank_id, &charges); if(ranks) { if(aa.second->GetMaxLevel(this) == ranks) { SendAlternateAdvancementRank(aa.first, ranks); } else { SendAlternateAdvancementRank(aa.first, ranks); SendAlternateAdvancementRank(aa.first, ranks + 1); } } else { SendAlternateAdvancementRank(aa.first, 1); } } } void Client::SendAlternateAdvancementRank(int aa_id, int level) { if(!zone) return; auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(aa_id, level); auto ability = ability_rank.first; auto rank = ability_rank.second; if(!ability) { return; } if(!(ability->classes & (1 << GetClass()))) { return; } if(!CanUseAlternateAdvancementRank(rank)) { return; } int size = sizeof(AARankInfo_Struct) + (sizeof(AARankEffect_Struct) * rank->effects.size()) + (sizeof(AARankPrereq_Struct) * rank->prereqs.size()); auto outapp = new EQApplicationPacket(OP_SendAATable, size); AARankInfo_Struct *aai = (AARankInfo_Struct*)outapp->pBuffer; aai->id = rank->id; aai->upper_hotkey_sid = rank->upper_hotkey_sid; aai->lower_hotkey_sid = rank->lower_hotkey_sid; aai->title_sid = rank->title_sid; aai->desc_sid = rank->desc_sid; aai->cost = rank->cost; aai->seq = aa_id; aai->type = ability->type; aai->spell = rank->spell; aai->spell_type = rank->spell_type; aai->spell_refresh = rank->recast_time; aai->classes = ability->classes; aai->level_req = rank->level_req; aai->current_level = level; aai->max_level = ability->GetMaxLevel(this); aai->prev_id = rank->prev_id; if((rank->next && !CanUseAlternateAdvancementRank(rank->next)) || ability->charges > 0) { aai->next_id = -1; } else { aai->next_id = rank->next_id; } aai->total_cost = rank->total_cost; aai->expansion = rank->expansion; aai->category = ability->category; aai->charges = ability->charges; aai->grant_only = ability->grant_only; aai->total_effects = rank->effects.size(); aai->total_prereqs = rank->prereqs.size(); outapp->SetWritePosition(sizeof(AARankInfo_Struct)); for(auto &effect : rank->effects) { outapp->WriteSInt32(effect.effect_id); outapp->WriteSInt32(effect.base1); outapp->WriteSInt32(effect.base2); outapp->WriteSInt32(effect.slot); } for(auto &prereq : rank->prereqs) { outapp->WriteSInt32(prereq.first); outapp->WriteSInt32(prereq.second); } QueuePacket(outapp); safe_delete(outapp); } void Client::SendAlternateAdvancementStats() { auto outapp = new EQApplicationPacket(OP_AAExpUpdate, sizeof(AltAdvStats_Struct)); AltAdvStats_Struct *aps = (AltAdvStats_Struct *)outapp->pBuffer; aps->experience = (uint32)(((float)330.0f * (float)m_pp.expAA) / (float)GetRequiredAAExperience()); aps->unspent = m_pp.aapoints; aps->percentage = m_epp.perAA; QueuePacket(outapp); safe_delete(outapp); } void Client::SendAlternateAdvancementPoints() { auto outapp = new EQApplicationPacket(OP_RespondAA, sizeof(AATable_Struct)); AATable_Struct* aa2 = (AATable_Struct *)outapp->pBuffer; int i = 0; for(auto &aa : zone->aa_abilities) { uint32 charges = 0; auto ranks = GetAA(aa.second->first_rank_id, &charges); if(ranks) { AA::Rank *rank = aa.second->GetRankByPointsSpent(ranks); if(rank) { aa2->aa_list[i].AA = rank->id; aa2->aa_list[i].value = rank->total_cost; aa2->aa_list[i].charges = charges; i++; } } } aa2->aa_spent = GetSpentAA(); QueuePacket(outapp); safe_delete(outapp); } void Client::SendAlternateAdvancementTimer(int ability, int begin, int end) { auto outapp = new EQApplicationPacket(OP_AAAction, sizeof(UseAA_Struct)); UseAA_Struct* uaaout = (UseAA_Struct*)outapp->pBuffer; uaaout->ability = ability; uaaout->begin = begin; uaaout->end = end; QueuePacket(outapp); safe_delete(outapp); } //sends all AA timers. void Client::SendAlternateAdvancementTimers() { //we dont use SendAATimer because theres no reason to allocate the EQApplicationPacket every time auto outapp = new EQApplicationPacket(OP_AAAction, sizeof(UseAA_Struct)); UseAA_Struct* uaaout = (UseAA_Struct*)outapp->pBuffer; PTimerList::iterator c, e; c = p_timers.begin(); e = p_timers.end(); for(; c != e; ++c) { PersistentTimer *cur = c->second; if(cur->GetType() < pTimerAAStart || cur->GetType() > pTimerAAEnd) continue; //not an AA timer //send timer uaaout->begin = cur->GetStartTime(); uaaout->end = static_cast<uint32>(time(nullptr)); uaaout->ability = cur->GetType() - pTimerAAStart; // uuaaout->ability is really a shared timer number QueuePacket(outapp); } safe_delete(outapp); } void Client::ResetAlternateAdvancementTimer(int ability) { AA::Rank *rank = zone->GetAlternateAdvancementRank(casting_spell_aa_id); if(rank) { SendAlternateAdvancementTimer(rank->spell_type, 0, time(0)); p_timers.Clear(&database, rank->spell_type + pTimerAAStart); } } void Client::ResetAlternateAdvancementTimers() { auto outapp = new EQApplicationPacket(OP_AAAction, sizeof(UseAA_Struct)); UseAA_Struct* uaaout = (UseAA_Struct*)outapp->pBuffer; PTimerList::iterator c, e; c = p_timers.begin(); e = p_timers.end(); std::vector<int> r_timers; for(; c != e; ++c) { PersistentTimer *cur = c->second; if(cur->GetType() < pTimerAAStart || cur->GetType() > pTimerAAEnd) continue; //send timer uaaout->begin = 0; uaaout->end = static_cast<uint32>(time(nullptr)); uaaout->ability = cur->GetType() - pTimerAAStart; r_timers.push_back(cur->GetType()); QueuePacket(outapp); } for(auto &i : r_timers) { p_timers.Clear(&database, i); } safe_delete(outapp); } void Client::ResetOnDeathAlternateAdvancement() { for (const auto &aa : aa_ranks) { auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(aa.first, aa.second.first); auto ability = ability_rank.first; auto rank = ability_rank.second; if (!ability) continue; if (!rank) continue; // since they're dying, we just need to clear the DB if (ability->reset_on_death) p_timers.Clear(&database, rank->spell_type + pTimerAAStart); } } void Client::PurchaseAlternateAdvancementRank(int rank_id) { AA::Rank *rank = zone->GetAlternateAdvancementRank(rank_id); if(!rank) { return; } if(!rank->base_ability) { return; } if(!CanPurchaseAlternateAdvancementRank(rank, true, true)) { return; } FinishAlternateAdvancementPurchase(rank, false); } bool Client::GrantAlternateAdvancementAbility(int aa_id, int points, bool ignore_cost) { bool ret = false; for(int i = 1; i <= points; ++i) { auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(aa_id, i); auto ability = ability_rank.first; auto rank = ability_rank.second; if(!rank) { continue; } if(!rank->base_ability) { continue; } if(!CanPurchaseAlternateAdvancementRank(rank, !ignore_cost, false)) { continue; } ret = true; FinishAlternateAdvancementPurchase(rank, ignore_cost); } return ret; } void Client::FinishAlternateAdvancementPurchase(AA::Rank *rank, bool ignore_cost) { int rank_id = rank->base_ability->first_rank_id; if(rank->base_ability->charges > 0) { uint32 charges = 0; GetAA(rank_id, &charges); if(charges > 0) { return; } SetAA(rank_id, rank->current_value, rank->base_ability->charges); } else { SetAA(rank_id, rank->current_value, 0); //if not max then send next aa if(rank->next) { SendAlternateAdvancementRank(rank->base_ability->id, rank->next->current_value); } } int cost = !ignore_cost ? rank->cost : 0; m_pp.aapoints -= cost ; SaveAA(); SendAlternateAdvancementPoints(); SendAlternateAdvancementStats(); if(rank->prev) { MessageString(Chat::Yellow, AA_IMPROVE, std::to_string(rank->title_sid).c_str(), std::to_string(rank->prev->current_value).c_str(), std::to_string(cost).c_str(), cost == 1 ? std::to_string(AA_POINT).c_str() : std::to_string(AA_POINTS).c_str()); /* QS: Player_Log_AA_Purchases */ if(RuleB(QueryServ, PlayerLogAAPurchases)) { std::string event_desc = StringFormat("Ranked AA Purchase :: aa_id:%i at cost:%i in zoneid:%i instid:%i", rank->id, cost, GetZoneID(), GetInstanceID()); QServ->PlayerLogEvent(Player_Log_AA_Purchases, CharacterID(), event_desc); } } else { MessageString(Chat::Yellow, AA_GAIN_ABILITY, std::to_string(rank->title_sid).c_str(), std::to_string(cost).c_str(), cost == 1 ? std::to_string(AA_POINT).c_str() : std::to_string(AA_POINTS).c_str()); /* QS: Player_Log_AA_Purchases */ if(RuleB(QueryServ, PlayerLogAAPurchases)) { std::string event_desc = StringFormat("Initial AA Purchase :: aa_id:%i at cost:%i in zoneid:%i instid:%i", rank->id, cost, GetZoneID(), GetInstanceID()); QServ->PlayerLogEvent(Player_Log_AA_Purchases, CharacterID(), event_desc); } } CalcBonuses(); if(cost > 0) { if(title_manager.IsNewAATitleAvailable(m_pp.aapoints_spent, GetBaseClass())) NotifyNewTitlesAvailable(); } } //need to rewrite this void Client::IncrementAlternateAdvancementRank(int rank_id) { AA::Rank *rank = zone->GetAlternateAdvancementRank(rank_id); if(!rank) { return; } if(!rank->base_ability) { return; } int points = GetAA(rank_id); GrantAlternateAdvancementAbility(rank->base_ability->id, points + 1, true); } void Client::ActivateAlternateAdvancementAbility(int rank_id, int target_id) { AA::Rank *rank = zone->GetAlternateAdvancementRank(rank_id); if(!rank) { return; } AA::Ability *ability = rank->base_ability; if(!ability) { return; } if(!IsValidSpell(rank->spell)) { return; } if(!CanUseAlternateAdvancementRank(rank)) { return; } bool use_toggle_passive_hotkey = UseTogglePassiveHotkey(*rank); //make sure it is not a passive if(!rank->effects.empty() && !use_toggle_passive_hotkey) { return; } uint32 charges = 0; // We don't have the AA if (!GetAA(rank_id, &charges)) return; //if expendable make sure we have charges if(ability->charges > 0 && charges < 1) return; //check cooldown if(!p_timers.Expired(&database, rank->spell_type + pTimerAAStart, false)) { uint32 aaremain = p_timers.GetRemainingTime(rank->spell_type + pTimerAAStart); uint32 aaremain_hr = aaremain / (60 * 60); uint32 aaremain_min = (aaremain / 60) % 60; uint32 aaremain_sec = aaremain % 60; if(aaremain_hr >= 1) { Message(Chat::Red, "You can use this ability again in %u hour(s) %u minute(s) %u seconds", aaremain_hr, aaremain_min, aaremain_sec); } else { Message(Chat::Red, "You can use this ability again in %u minute(s) %u seconds", aaremain_min, aaremain_sec); } return; } //calculate cooldown int cooldown = rank->recast_time - GetAlternateAdvancementCooldownReduction(rank); if(cooldown < 0) { cooldown = 0; } if (!IsCastWhileInvis(rank->spell)) CommonBreakInvisible(); if (spells[rank->spell].sneak && (!hidden || (hidden && (Timer::GetCurrentTime() - tmHidden) < 4000))) { MessageString(Chat::SpellFailure, SNEAK_RESTRICT); return; } // // Modern clients don't require pet targeted for AA casts that are ST_Pet if (spells[rank->spell].targettype == ST_Pet || spells[rank->spell].targettype == ST_SummonedPet) target_id = GetPetID(); // extra handling for cast_not_standing spells if (!spells[rank->spell].cast_not_standing) { if (GetAppearance() == eaSitting) // we need to stand! SetAppearance(eaStanding, false); if (GetAppearance() != eaStanding) { MessageString(Chat::SpellFailure, STAND_TO_CAST); return; } } if (use_toggle_passive_hotkey) { TogglePassiveAlternativeAdvancement(*rank, ability->id); } else { // Bards can cast instant cast AAs while they are casting another song if (spells[rank->spell].cast_time == 0 && GetClass() == BARD && IsBardSong(casting_spell_id)) { if (!SpellFinished(rank->spell, entity_list.GetMob(target_id), EQ::spells::CastingSlot::AltAbility, spells[rank->spell].mana, -1, spells[rank->spell].ResistDiff, false)) { return; } ExpendAlternateAdvancementCharge(ability->id); } else { if (!CastSpell(rank->spell, target_id, EQ::spells::CastingSlot::AltAbility, -1, -1, 0, -1, rank->spell_type + pTimerAAStart, cooldown, nullptr, rank->id)) { return; } } } CastToClient()->GetPTimers().Start(rank->spell_type + pTimerAAStart, cooldown); SendAlternateAdvancementTimer(rank->spell_type, 0, 0); } int Mob::GetAlternateAdvancementCooldownReduction(AA::Rank *rank_in) { if(!rank_in) { return 0; } AA::Ability *ability_in = rank_in->base_ability; if(!ability_in) { return 0; } for(auto &aa : aa_ranks) { auto ability_rank = zone->GetAlternateAdvancementAbilityAndRank(aa.first, aa.second.first); auto ability = ability_rank.first; auto rank = ability_rank.second; if(!ability) { continue; } for(auto &effect : rank->effects) { if(effect.effect_id == SE_HastenedAASkill && effect.base2 == ability_in->id) { return effect.base1; } } } return 0; } void Mob::ExpendAlternateAdvancementCharge(uint32 aa_id) { for (auto &iter : aa_ranks) { AA::Ability *ability = zone->GetAlternateAdvancementAbility(iter.first); if (ability && aa_id == ability->id) { if (iter.second.second > 0) { iter.second.second -= 1; if (iter.second.second == 0) { if (IsClient()) { AA::Rank *r = ability->GetRankByPointsSpent(iter.second.first); if (r) { CastToClient()->GetEPP().expended_aa += r->cost; } } if (IsClient()) { auto c = CastToClient(); c->RemoveExpendedAA(ability->first_rank_id); } aa_ranks.erase(iter.first); } if (IsClient()) { Client *c = CastToClient(); c->SaveAA(); c->SendAlternateAdvancementPoints(); } } return; } } } bool ZoneDatabase::LoadAlternateAdvancement(Client *c) { c->ClearAAs(); std::string query = StringFormat( "SELECT " "aa_id, " "aa_value, " "charges " "FROM " "`character_alternate_abilities` " "WHERE `id` = %u", c->CharacterID()); MySQLRequestResult results = database.QueryDatabase(query); int i = 0; for(auto row = results.begin(); row != results.end(); ++row) { uint32 aa = atoi(row[0]); uint32 value = atoi(row[1]); uint32 charges = atoi(row[2]); auto rank = zone->GetAlternateAdvancementRank(aa); if(!rank) { continue; } auto ability = rank->base_ability; if(!ability) { continue; } rank = ability->GetRankByPointsSpent(value); if(c->CanUseAlternateAdvancementRank(rank)) { c->GetPP().aa_array[i].AA = aa; c->GetPP().aa_array[i].value = value; c->GetPP().aa_array[i].charges = charges; c->SetAA(aa, value, charges); i++; } } return true; } AA::Ability *Zone::GetAlternateAdvancementAbility(int id) { auto iter = aa_abilities.find(id); if(iter != aa_abilities.end()) { return iter->second.get(); } return nullptr; } AA::Ability *Zone::GetAlternateAdvancementAbilityByRank(int rank_id) { AA::Rank *rank = GetAlternateAdvancementRank(rank_id); if(!rank) return nullptr; return rank->base_ability; } AA::Rank *Zone::GetAlternateAdvancementRank(int rank_id) { auto iter = aa_ranks.find(rank_id); if(iter != aa_ranks.end()) { return iter->second.get(); } return nullptr; } std::pair<AA::Ability*, AA::Rank*> Zone::GetAlternateAdvancementAbilityAndRank(int id, int points_spent) { AA::Ability *ability = GetAlternateAdvancementAbility(id); if(!ability) { return std::make_pair(nullptr, nullptr); } AA::Rank *rank = ability->GetRankByPointsSpent(points_spent); if(!rank) { return std::make_pair(nullptr, nullptr); } return std::make_pair(ability, rank); } uint32 Mob::GetAA(uint32 rank_id, uint32 *charges) const { if(zone) { AA::Ability *ability = zone->GetAlternateAdvancementAbilityByRank(rank_id); if(!ability) return 0; auto iter = aa_ranks.find(ability->id); if(iter != aa_ranks.end()) { if(charges) { *charges = iter->second.second; } return iter->second.first; } } return 0; } uint32 Mob::GetAAByAAID(uint32 aa_id, uint32 *charges) const { if(zone) { AA::Ability *ability = zone->GetAlternateAdvancementAbility(aa_id); if(!ability) return 0; auto iter = aa_ranks.find(ability->id); if(iter != aa_ranks.end()) { if(charges) { *charges = iter->second.second; } return iter->second.first; } } return 0; } bool Mob::SetAA(uint32 rank_id, uint32 new_value, uint32 charges) { if(zone) { AA::Ability *ability = zone->GetAlternateAdvancementAbilityByRank(rank_id); if(!ability) { return false; } if(new_value > ability->GetMaxLevel(this)) { return false; } aa_ranks[ability->id] = std::make_pair(new_value, charges); } return true; } bool Mob::CanUseAlternateAdvancementRank(AA::Rank *rank) { AA::Ability *ability = rank->base_ability; if(!ability) return false; if(!(ability->classes & (1 << GetClass()))) { return false; } // Passive and Active Shroud AAs // For now we skip them if(ability->category == 3 || ability->category == 4) { return false; } //the one titanium hack i will allow //just to make sure we dont crash the client with newer aas //we'll exclude any expendable ones if(IsClient() && CastToClient()->ClientVersionBit() & EQ::versions::maskTitaniumAndEarlier) { if(ability->charges > 0) { return false; } } if (IsClient()) { if (rank->expansion && !(CastToClient()->GetPP().expansions & (1 << (rank->expansion - 1)))) { return false; } } #ifdef BOTS else if (IsBot()) { if (rank->expansion && !(RuleI(Bots, BotExpansionSettings) & (1 << (rank->expansion - 1)))) { return false; } } #endif else { if (rank->expansion && !(RuleI(World, ExpansionSettings) & (1 << (rank->expansion - 1)))) { return false; } } auto race = GetPlayerRaceValue(GetBaseRace()); race = race > 16 ? 1 : race; if(!(ability->races & (1 << (race - 1)))) { return false; } auto deity = GetDeityBit(); if(!(ability->deities & deity)) { return false; } if(IsClient() && CastToClient()->Admin() < ability->status) { return false; } if(GetBaseRace() == 522) { //drakkin_heritage if(!(ability->drakkin_heritage & (1 << GetDrakkinHeritage()))) { return false; } } return true; } bool Mob::CanPurchaseAlternateAdvancementRank(AA::Rank *rank, bool check_price, bool check_grant) { AA::Ability *ability = rank->base_ability; if(!ability) return false; if(!CanUseAlternateAdvancementRank(rank)) { return false; } //You can't purchase grant only AAs they can only be assigned if(check_grant && ability->grant_only) { return false; } //check level req if(rank->level_req > GetLevel()) { return false; } uint32 current_charges = 0; auto points = GetAA(rank->id, &current_charges); //check that we are on previous rank already (if exists) //grant ignores the req to own the previous rank. if(check_grant && rank->prev) { if(points != rank->prev->current_value) { return false; } } //check that we aren't already on this rank or one ahead of us if(points >= rank->current_value) { return false; } //if expendable only let us purchase if we have no charges already //not quite sure on how this functions client side atm //I intend to look into it later to make sure the behavior is right if(ability->charges > 0 && current_charges > 0) { return false; } //check prereqs for(auto &prereq : rank->prereqs) { AA::Ability *prereq_ability = zone->GetAlternateAdvancementAbility(prereq.first); if(prereq_ability) { auto ranks = GetAA(prereq_ability->first_rank_id); if(ranks < prereq.second) { return false; } } } //check price, if client if(check_price && IsClient()) { if(rank->cost > CastToClient()->GetAAPoints()) { return false; } } return true; } void Zone::LoadAlternateAdvancement() { LogInfo("Loading Alternate Advancement Data"); if(!content_db.LoadAlternateAdvancementAbilities(aa_abilities, aa_ranks)) { aa_abilities.clear(); aa_ranks.clear(); LogInfo("Failed to load Alternate Advancement Data"); return; } LogInfo("Processing Alternate Advancement Data"); for(const auto &ability : aa_abilities) { ability.second->first = GetAlternateAdvancementRank(ability.second->first_rank_id); //process these ranks AA::Rank *current = ability.second->first; int i = 1; int prev_id = -1; while(current) { current->prev_id = prev_id; current->prev = GetAlternateAdvancementRank(current->prev_id); current->next = GetAlternateAdvancementRank(current->next_id); current->base_ability = ability.second.get(); current->current_value = i; if(current->prev) { current->total_cost = current->cost + current->prev->total_cost; //check prereqs here for(auto &prev_prereq : current->prev->prereqs) { //if prev has an aa we dont have set // then set it here too //if prev has an aa we have // then set to whichever is highest auto iter = current->prereqs.find(prev_prereq.first); if(iter == current->prereqs.end()) { //not found current->prereqs[prev_prereq.first] = prev_prereq.second; } else { //they already have it too! auto points = std::max(iter->second, prev_prereq.second); current->prereqs[iter->first] = points; } } } else { current->prev_id = -1; current->total_cost = current->cost; } if(!current->next) { current->next_id = -1; } i++; prev_id = current->id; current = current->next; } } LogInfo("Loaded Alternate Advancement Data"); } bool ZoneDatabase::LoadAlternateAdvancementAbilities(std::unordered_map<int, std::unique_ptr<AA::Ability>> &abilities, std::unordered_map<int, std::unique_ptr<AA::Rank>> &ranks) { LogInfo("Loading Alternate Advancement Abilities"); abilities.clear(); std::string query = "SELECT id, name, category, classes, races, deities, drakkin_heritage, status, type, charges, " "grant_only, reset_on_death, first_rank_id FROM aa_ability WHERE enabled = 1"; auto results = QueryDatabase(query); if(results.Success()) { for(auto row = results.begin(); row != results.end(); ++row) { auto ability = new AA::Ability; ability->id = atoi(row[0]); ability->name = row[1]; ability->category = atoi(row[2]); //EQ client has classes left shifted by one bit for some odd reason ability->classes = atoi(row[3]) << 1; ability->races = atoi(row[4]); ability->deities = atoi(row[5]); ability->drakkin_heritage = atoi(row[6]); ability->status = atoi(row[7]); ability->type = atoi(row[8]); ability->charges = atoi(row[9]); ability->grant_only = atoi(row[10]) != 0 ? true : false; ability->reset_on_death = atoi(row[11]) != 0 ? true : false; ability->first_rank_id = atoi(row[12]); ability->first = nullptr; abilities[ability->id] = std::unique_ptr<AA::Ability>(ability); } } else { LogError("Failed to load Alternate Advancement Abilities"); return false; } LogInfo("Loaded [{}] Alternate Advancement Abilities", (int)abilities.size()); LogInfo("Loading Alternate Advancement Ability Ranks"); ranks.clear(); query = "SELECT id, upper_hotkey_sid, lower_hotkey_sid, title_sid, desc_sid, cost, level_req, spell, spell_type, recast_time, " "next_id, expansion FROM aa_ranks"; results = QueryDatabase(query); if(results.Success()) { for(auto row = results.begin(); row != results.end(); ++row) { auto rank = new AA::Rank; rank->id = atoi(row[0]); rank->upper_hotkey_sid = atoi(row[1]); rank->lower_hotkey_sid = atoi(row[2]); rank->title_sid = atoi(row[3]); rank->desc_sid = atoi(row[4]); rank->cost = atoi(row[5]); rank->level_req = atoi(row[6]); rank->spell = atoi(row[7]); rank->spell_type = atoi(row[8]); rank->recast_time = atoi(row[9]); rank->next_id = atoi(row[10]); rank->expansion = atoi(row[11]); rank->base_ability = nullptr; rank->total_cost = 0; rank->prev_id = -1; rank->next = nullptr; rank->prev = nullptr; ranks[rank->id] = std::unique_ptr<AA::Rank>(rank); } } else { LogError("Failed to load Alternate Advancement Ability Ranks"); return false; } LogInfo("Loaded [{}] Alternate Advancement Ability Ranks", (int)ranks.size()); LogInfo("Loading Alternate Advancement Ability Rank Effects"); query = "SELECT rank_id, slot, effect_id, base1, base2 FROM aa_rank_effects"; results = QueryDatabase(query); if(results.Success()) { for(auto row = results.begin(); row != results.end(); ++row) { AA::RankEffect effect; int rank_id = atoi(row[0]); effect.slot = atoi(row[1]); effect.effect_id = atoi(row[2]); effect.base1 = atoi(row[3]); effect.base2 = atoi(row[4]); if(effect.slot < 1) continue; if(ranks.count(rank_id) > 0) { AA::Rank *rank = ranks[rank_id].get(); rank->effects.push_back(effect); } } } else { LogError("Failed to load Alternate Advancement Ability Rank Effects"); return false; } LogInfo("Loaded Alternate Advancement Ability Rank Effects"); LogInfo("Loading Alternate Advancement Ability Rank Prereqs"); query = "SELECT rank_id, aa_id, points FROM aa_rank_prereqs"; results = QueryDatabase(query); if(results.Success()) { for(auto row = results.begin(); row != results.end(); ++row) { int rank_id = atoi(row[0]); int aa_id = atoi(row[1]); int points = atoi(row[2]); if(aa_id <= 0 || points <= 0) { continue; } if(ranks.count(rank_id) > 0) { AA::Rank *rank = ranks[rank_id].get(); rank->prereqs[aa_id] = points; } } } else { LogError("Failed to load Alternate Advancement Ability Rank Prereqs"); return false; } LogInfo("Loaded Alternate Advancement Ability Rank Prereqs"); return true; } bool Mob::CheckAATimer(int timer) { if (timer >= aaTimerMax) return false; if (aa_timers[timer].Enabled()) { if (aa_timers[timer].Check(false)) { aa_timers[timer].Disable(); return false; } else { return true; } } return false; } void Client::TogglePassiveAlternativeAdvancement(const AA::Rank &rank, uint32 ability_id) { /* Certain AA, like Weapon Stance line use a special toggle Hotkey to enable or disable the AA's passive abilities. This is occurs by doing the following. Each 'rank' of Weapon Stance is actually 2 actual ranks. First rank is always the Disabled version which cost X amount of AA. Second rank is the Enabled version which cost 0 AA. When you buy the first rank, you make a hotkey that on live say 'Weapon Stance Disabled', if you clik that it then BUYS the next rank of AA (cost 0) which switches the hotkey to 'Enabled Weapon Stance' and you are given the passive buff effects. If you click the Enabled hotkey, it causes you to lose an AA rank and once again be disabled. Thus, you are switching between two AA ranks. Thefore when creating an AA using this ability, you need generate both ranks. Follow the same pattern for additional ranks. IMPORTANT! The toggle system can be used to Enable or Disable ANY passive AA. You just need to follow the instructions on how to create it. Example: Enable or Disable a buff that gives a large hate modifier. Play may Enable when tanking and Disable when DPS ect. Note: On live the Enabled rank is shown having a Charge of 1, while Disabled rank has no charges. Our current code doesn't support that. Do not use charges. Note: Live uses a spell 'Disable Ability' ID 46164 to trigger a script to do the AA rank changes. At present time it is not coded to require that, any spell id works. Note: Discovered a bug on ROF2, where when you buy first rank of an AA with a hotkey, it will always display the title of the second rank in the database. Be aware. No easy fix. Dev Note(Kayen 8/1/21): The system as set up is very similar to live, with exception that live gives the Enabled rank 1 Charge. The code here emulates what happens when a charge would be expended. Instructions for how to make the AA - assuming a basic level of knowledge of how AA's work. - aa_abilities table : Create new ability with a hotkey, type 3, zero charges - aa_ranks table : [Disabled rank] First rank, should have a cost > 0 (this is what you buy), Set hotkeys, MUST SET A SPELL CONTAINING EFFECT SE_Buy_AA_Rank(SPA 472), set a short recast timer. [Enabled rank] Second rank, should have a cost = 0, Set hotkeys, Set any valid spell ID you want (it has to exist but does nothing), set a short recast timer. *Recommend if doing custom, just make the hotkey titled 'Toggle <Ability Name>' and use for both. - aa_rank_effects table : [Disabled rank] No data needed in the aa_ranks_effect table [Enabled rank] Second rank set effect_id = 457 (weapon stance), slot 1,2,3, base1= spell triggers, base= weapon type (0=2H,1=SH,2=DW), for slot 1,2,3 Example SQL -Disabled DO NOT ADD any data to the aa_rank_effects for this rank_id -Enabled INSERT INTO aa_rank_effects (rank_id, slot, effect_id, base1, base2) VALUES (20003, 1, 476, 145,0); INSERT INTO aa_rank_effects (rank_id, slot, effect_id, base1, base2) VALUES (20003, 2, 476, 174,1); INSERT INTO aa_rank_effects (rank_id, slot, effect_id, base1, base2) VALUES (20003, 3, 476, 172,2); Warning: If you want to design an AA that only uses one weapon type to trigger, like will only apply buff if Shield. Do not include data for other types. Never have a base value=0 in the Enabled rank. */ bool enable_next_rank = IsEffectInSpell(rank.spell, SE_Buy_AA_Rank); if (enable_next_rank) { //Enable TogglePurchaseAlternativeAdvancementRank(rank.next_id); Message(Chat::Spells, "You enable an ability."); //Message live gives you. Should come from spell. AA::Rank *rank_next = zone->GetAlternateAdvancementRank(rank.next_id); //Add checks for any special cases for toggle. if (IsEffectinAlternateAdvancementRankEffects(*rank_next, SE_Weapon_Stance)) { weaponstance.aabonus_enabled = true; ApplyWeaponsStance(); } return; } else { //Disable ResetAlternateAdvancementRank(ability_id); TogglePurchaseAlternativeAdvancementRank(rank.prev_id); Message(Chat::Spells, "You disable an ability."); //Message live gives you. Should come from spell. //Add checks for any special cases for toggle. if (IsEffectinAlternateAdvancementRankEffects(rank, SE_Weapon_Stance)) { weaponstance.aabonus_enabled = false; BuffFadeBySpellID(weaponstance.aabonus_buff_spell_id); } return; } } bool Client::UseTogglePassiveHotkey(const AA::Rank &rank) { /* Disabled rank needs a rank spell containing the SE_Buy_AA_Rank effect to return true. Enabled rank checks to see if the prior rank contains a rank spell with SE_Buy_AA_Rank, if so true. Note: On live the enabled rank is Expendable with Charge 1. We have already confirmed the rank spell is valid before this function is called. */ if (IsEffectInSpell(rank.spell, SE_Buy_AA_Rank)) {//Checked when is Disabled. return true; } else if (rank.prev_id != -1) {//Check when effect is Enabled. AA::Rank *rank_prev = zone->GetAlternateAdvancementRank(rank.prev_id); if (IsEffectInSpell(rank_prev->spell, SE_Buy_AA_Rank)) { return true; } } return false; } bool Client::IsEffectinAlternateAdvancementRankEffects(const AA::Rank &rank, int effect_id) { for (const auto &e : rank.effects) { if (e.effect_id == effect_id) { return true; } } return false; } void Client::ResetAlternateAdvancementRank(uint32 aa_id) { /* Resets your AA to baseline */ for(auto &iter : aa_ranks) { AA::Ability *ability = zone->GetAlternateAdvancementAbility(iter.first); if(ability && aa_id == ability->id) { RemoveExpendedAA(ability->first_rank_id); aa_ranks.erase(iter.first); SaveAA(); SendAlternateAdvancementPoints(); return; } } } void Client::TogglePurchaseAlternativeAdvancementRank(int rank_id){ /* Stripped down version of purchasing AA. Will give no messages. Used with toggle hotkey functions. */ AA::Rank *rank = zone->GetAlternateAdvancementRank(rank_id); if (!rank) { return; } if (!rank->base_ability) { return; } if (!CanPurchaseAlternateAdvancementRank(rank, false, false)) { return; } rank_id = rank->base_ability->first_rank_id; SetAA(rank_id, rank->current_value, 0); if (rank->next) { SendAlternateAdvancementRank(rank->base_ability->id, rank->next->current_value); } SaveAA(); SendAlternateAdvancementPoints(); SendAlternateAdvancementStats(); CalcBonuses(); }
1
10,779
Wouldn't we want to namespace this `AA` or something similar
EQEmu-Server
cpp
@@ -395,7 +395,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, // Load additional config from file(s) sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && envCfg.Creds.HasKeys() { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { return nil, err } }
1
package session import ( "crypto/tls" "crypto/x509" "fmt" "io" "io/ioutil" "net/http" "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" ) const ( // ErrCodeSharedConfig represents an error that occurs in the shared // configuration logic ErrCodeSharedConfig = "SharedConfigErr" ) // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) // ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) // A Session provides a central location to create service clients from and // store configurations and request handlers for those services. // // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // // The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers } // New creates a new instance of the handlers merging in the provided configs // on top of the SDK's default configurations. Once the Session is created it // can be mutated to modify the Config or Handlers. The Session is safe to be // read concurrently, but it should not be written to concurrently. // // If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New // method could now encounter an error when loading the configuration. When // The environment variable is set, and an error occurs, New will return a // session that will fail all requests reporting the error that occurred while // loading the session. Use NewSession to get the error when creating the // session. // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded, in addition to // the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. // // Deprecated: Use NewSession functions to create sessions instead. NewSession // has the same functionality as New except an error can be returned when the // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config cfg.MergeIn(cfgs...) s, err := NewSessionWithOptions(Options{ Config: cfg, SharedConfigState: SharedConfigEnable, }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This // needs to be replicated if an error occurs while creating // the session. msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + "Use session.NewSession to handle errors occurring during session creation." // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} s.Config.MergeIn(cfgs...) s.Config.Logger.Log("ERROR:", msg, "Error:", err) s.Handlers.Validate.PushBack(func(r *request.Request) { r.Error = err }) } return s } s := deprecatedNewSession(cfgs...) if envCfg.CSMEnabled { err := enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) if err != nil { err = fmt.Errorf("failed to enable CSM, %v", err) s.Config.Logger.Log("ERROR:", err.Error()) s.Handlers.Validate.PushBack(func(r *request.Request) { r.Error = err }) } } return s } // NewSession returns a new Session created from SDK defaults, config files, // environment, and user provided config files. Once the Session is created // it can be mutated to modify the Config or Handlers. The Session is safe to // be read concurrently, but it should not be written to concurrently. // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to // the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or // control through code how the Session will be created. Such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} opts.Config.MergeIn(cfgs...) return NewSessionWithOptions(opts) } // SharedConfigState provides the ability to optionally override the state // of the session's creation based on the shared config being enabled or // disabled. type SharedConfigState int const ( // SharedConfigStateFromEnv does not override any state of the // AWS_SDK_LOAD_CONFIG env var. It is the default value of the // SharedConfigState type. SharedConfigStateFromEnv SharedConfigState = iota // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value // and disables the shared config functionality. SharedConfigDisable // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value // and enables the shared config functionality. SharedConfigEnable ) // Options provides the means to control how a Session is created and what // configuration values will be loaded. // type Options struct { // Provides config values for the SDK to use when creating service clients // and making API requests to services. Any value set in with this field // will override the associated value provided by the SDK defaults, // environment or config files where relevant. // // If not set, configuration values from from SDK defaults, environment, // config will be used. Config aws.Config // Overrides the config profile the Session should be created from. If not // set the value of the environment variable will be loaded (AWS_PROFILE, // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). // // If not set and environment variables are not set the "default" // (DefaultSharedConfigProfile) will be used as the profile to load the // session config from. Profile string // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG // environment variable. By default a Session will be created using the // value provided by the AWS_SDK_LOAD_CONFIG environment variable. // // Setting this value to SharedConfigEnable or SharedConfigDisable // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable // and enable or disable the shared config functionality. SharedConfigState SharedConfigState // Ordered list of files the session will load configuration from. // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. SharedConfigFiles []string // When the SDK's shared config is configured to assume a role with MFA // this option is required in order to provide the mechanism that will // retrieve the MFA token. There is no default value for this field. If // it is not set an error will be returned when creating the session. // // This token provider will be called when ever the assumed role's // credentials need to be refreshed. Within the context of service clients // all sharing the same session the SDK will ensure calls to the token // provider are atomic. When sharing a token provider across multiple // sessions additional synchronization logic is needed to ensure the // token providers do not introduce race conditions. It is recommend to // share the session where possible. // // stscreds.StdinTokenProvider is a basic implementation that will prompt // from stdin for the MFA token code. // // This field is only used if the shared configuration is enabled, and // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) // When the SDK's shared config is configured to assume a role this option // may be provided to set the expiry duration of the STS credentials. // Defaults to 15 minutes if not set as documented in the // stscreds.AssumeRoleProvider. AssumeRoleDuration time.Duration // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. // // Enabling this option will attempt to merge the Transport into the SDK's HTTP // client. If the client's Transport is not a http.Transport an error will be // returned. If the Transport's TLS config is set this option will cause the SDK // to overwrite the Transport's TLS config's RootCAs value. If the CA // bundle reader contains multiple certificates all of them will be loaded. // // The Session option CustomCABundle is also available when creating sessions // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader // The handlers that the session and all API clients will be created with. // This must be a complete set of handlers. Use the defaults.Handlers() // function to initialize this value before changing the handlers to be // used by the SDK. Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, // environment, and user provided config files. This func uses the Options // values to configure how the Session is created. // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to // the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // // // Equivalent to session.New // sess := session.Must(session.NewSessionWithOptions(session.Options{})) // // // Specify profile to load for the session's config // sess := session.Must(session.NewSessionWithOptions(session.Options{ // Profile: "profile_name", // })) // // // Specify profile for config and region for requests // sess := session.Must(session.NewSessionWithOptions(session.Options{ // Config: aws.Config{Region: aws.String("us-east-1")}, // Profile: "profile_name", // })) // // // Force enable Shared Config support // sess := session.Must(session.NewSessionWithOptions(session.Options{ // SharedConfigState: session.SharedConfigEnable, // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig if opts.SharedConfigState == SharedConfigEnable { envCfg = loadSharedEnvConfig() } else { envCfg = loadEnvConfig() } if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } switch opts.SharedConfigState { case SharedConfigDisable: envCfg.EnableSharedConfig = false case SharedConfigEnable: envCfg.EnableSharedConfig = true } // Only use AWS_CA_BUNDLE if session option is not provided. if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { f, err := os.Open(envCfg.CustomCABundle) if err != nil { return nil, awserr.New("LoadCustomCABundleError", "failed to open custom CA bundle PEM file", err) } defer f.Close() opts.CustomCABundle = f } return newSession(opts, envCfg, &opts.Config) } // Must is a helper function to ensure the Session is valid and there was no // error when calling a NewSession function. // // This helper is intended to be used in variable initialization to load the // Session and configuration at startup. Such as: // // var sess = session.Must(session.NewSession()) func Must(sess *Session, err error) *Session { if err != nil { panic(err) } return sess } func deprecatedNewSession(cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() // Apply the passed in configs so the configuration can be applied to the // default credential chain cfg.MergeIn(cfgs...) if cfg.EndpointResolver == nil { // An endpoint resolver is required for a session to be able to provide // endpoints for service client configurations. cfg.EndpointResolver = endpoints.DefaultResolver() } cfg.Credentials = defaults.CredChain(cfg, handlers) // Reapply any passed in configs to override credentials if set cfg.MergeIn(cfgs...) s := &Session{ Config: cfg, Handlers: handlers, } initHandlers(s) return s } func enableCSM(handlers *request.Handlers, clientID, host, port string, logger aws.Logger, ) error { if logger != nil { logger.Log("Enabling CSM") } r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port)) if err != nil { return err } r.InjectHandlers(handlers) return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := opts.Handlers if handlers.IsEmpty() { handlers = defaults.Handlers() } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. var cfgFiles []string if opts.SharedConfigFiles != nil { cfgFiles = opts.SharedConfigFiles } else { cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} if !envCfg.EnableSharedConfig { // The shared config file (~/.aws/config) is only loaded if instructed // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). cfgFiles = cfgFiles[1:] } } // Load additional config from file(s) sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { if _, ok := err.(SharedConfigProfileNotExistsError); !ok { return nil, err } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { return nil, err } s := &Session{ Config: cfg, Handlers: handlers, } initHandlers(s) if envCfg.CSMEnabled { err := enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) if err != nil { return nil, err } } // Setup HTTP client with custom cert bundle if enabled if opts.CustomCABundle != nil { if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { return nil, err } } return s, nil } func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { case *http.Transport: t = v default: if s.Config.HTTPClient.Transport != nil { return awserr.New("LoadCustomCABundleError", "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) } } if t == nil { // Nil transport implies `http.DefaultTransport` should be used. Since // the SDK cannot modify, nor copy the `DefaultTransport` specifying // the values the next closest behavior. t = getCABundleTransport() } p, err := loadCertPool(bundle) if err != nil { return err } if t.TLSClientConfig == nil { t.TLSClientConfig = &tls.Config{} } t.TLSClientConfig.RootCAs = p s.Config.HTTPClient.Transport = t return nil } func loadCertPool(r io.Reader) (*x509.CertPool, error) { b, err := ioutil.ReadAll(r) if err != nil { return nil, awserr.New("LoadCustomCABundleError", "failed to read custom CA bundle PEM file", err) } p := x509.NewCertPool() if !p.AppendCertsFromPEM(b) { return nil, awserr.New("LoadCustomCABundleError", "failed to load custom CA bundle PEM file", err) } return p, nil } func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options, ) error { // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { if len(envCfg.Region) > 0 { cfg.WithRegion(envCfg.Region) } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { cfg.WithRegion(sharedCfg.Region) } } if cfg.EnableEndpointDiscovery == nil { if envCfg.EnableEndpointDiscovery != nil { cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) } } // Configure credentials if not already set by the user when creating the // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) if err != nil { return err } cfg.Credentials = creds } return nil } func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) if !aws.BoolValue(s.Config.DisableParamValidation) { s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) } } // Copy creates and returns a copy of the current Session, coping the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // // // Create a copy of the current Session, configured for the us-west-2 region. // sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), Handlers: s.Handlers.Copy(), } initHandlers(newSession) return newSession } // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { // Backwards compatibility, the error will be eaten if user calls ClientConfig // directly. All SDK services will use ClientconfigWithError. cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) return cfg } func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint var err error region := aws.StringValue(s.Config.Region) if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) resolved.SigningRegion = region } else { resolved, err = s.Config.EndpointResolver.EndpointFor( serviceName, region, func(opt *endpoints.Options) { opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) // Support the condition where the service is modeled but its // endpoint metadata is not available. opt.ResolveUnknownService = true }, ) } return client.Config{ Config: s.Config, Handlers: s.Handlers, Endpoint: resolved.URL, SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, }, err } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception // that the EndpointResolver will not be used to resolve the endpoint. The only // endpoint set must come from the aws.Config.Endpoint field. func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint region := aws.StringValue(s.Config.Region) if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) resolved.SigningRegion = region } return client.Config{ Config: s.Config, Handlers: s.Handlers, Endpoint: resolved.URL, SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, } }
1
9,805
This probably should look beyond envConfig, and include `aws.Config.Credentials` as well.
aws-aws-sdk-go
go
@@ -110,6 +110,7 @@ namespace Nethermind.TxPool _filterPipeline.Add(new TooFarNonceFilter(txPoolConfig, _accounts, _transactions, _logger)); _filterPipeline.Add(new TooExpensiveTxFilter(_headInfo, _accounts, _logger)); _filterPipeline.Add(new FeeToLowFilter(_headInfo, _accounts, _transactions, _logger)); + _filterPipeline.Add(new NotEnoughBalanceFilter(_headInfo, _accounts, _logger)); _filterPipeline.Add(new ReusedOwnNonceTxFilter(_accounts, _nonces, _logger)); if (incomingTxFilter is not null) {
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Nethermind.Core; using Nethermind.Core.Caching; using Nethermind.Core.Crypto; using Nethermind.Core.Specs; using Nethermind.Core.Timers; using Nethermind.Crypto; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.TxPool.Collections; using Nethermind.TxPool.Filters; [assembly: InternalsVisibleTo("Nethermind.Blockchain.Test")] namespace Nethermind.TxPool { /// <summary> /// Stores all pending transactions. These will be used by block producer if this node is a miner / validator /// or simply for broadcasting and tracing in other cases. /// </summary> public partial class TxPool : ITxPool, IDisposable { private readonly object _locker = new(); private readonly ConcurrentDictionary<Address, AddressNonces> _nonces = new(); private readonly List<IIncomingTxFilter> _filterPipeline = new(); private readonly HashCache _hashCache = new(); private readonly TxBroadcaster _broadcaster; private readonly TxDistinctSortedPool _transactions; private readonly IChainHeadSpecProvider _specProvider; private readonly IAccountStateProvider _accounts; private readonly IChainHeadInfoProvider _headInfo; private readonly ILogger _logger; /// <summary> /// Indexes transactions /// </summary> private ulong _txIndex; /// <summary> /// This class stores all known pending transactions that can be used for block production /// (by miners or validators) or simply informing other nodes about known pending transactions (broadcasting). /// </summary> /// <param name="txStorage">Tx storage used to reject known transactions.</param> /// <param name="ecdsa">Used to recover sender addresses from transaction signatures.</param> /// <param name="chainHeadInfoProvider"></param> /// <param name="txPoolConfig"></param> /// <param name="validator"></param> /// <param name="logManager"></param> /// <param name="comparer"></param> /// <param name="incomingTxFilter"></param> public TxPool( IEthereumEcdsa ecdsa, IChainHeadInfoProvider chainHeadInfoProvider, ITxPoolConfig txPoolConfig, ITxValidator validator, ILogManager? logManager, IComparer<Transaction> comparer, IIncomingTxFilter? incomingTxFilter = null) { _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); _headInfo = chainHeadInfoProvider ?? throw new ArgumentNullException(nameof(chainHeadInfoProvider)); _accounts = _headInfo.AccountStateProvider; _specProvider = _headInfo.SpecProvider; MemoryAllowance.MemPoolSize = txPoolConfig.Size; AddNodeInfoEntryForTxPool(); _transactions = new TxDistinctSortedPool(MemoryAllowance.MemPoolSize, comparer, logManager); _broadcaster = new TxBroadcaster(comparer, TimerFactory.Default, txPoolConfig, logManager); _headInfo.HeadChanged += OnHeadChange; _filterPipeline.Add(new NullHashTxFilter()); _filterPipeline.Add(new AlreadyKnownTxFilter(_hashCache)); _filterPipeline.Add(new MalformedTxFilter(_specProvider, validator, _logger)); _filterPipeline.Add(new GasLimitTxFilter(_headInfo, txPoolConfig, _logger)); _filterPipeline.Add(new UnknownSenderFilter(ecdsa, _logger)); _filterPipeline.Add(new LowNonceFilter(_accounts, _logger)); _filterPipeline.Add(new TooFarNonceFilter(txPoolConfig, _accounts, _transactions, _logger)); _filterPipeline.Add(new TooExpensiveTxFilter(_headInfo, _accounts, _logger)); _filterPipeline.Add(new FeeToLowFilter(_headInfo, _accounts, _transactions, _logger)); _filterPipeline.Add(new ReusedOwnNonceTxFilter(_accounts, _nonces, _logger)); if (incomingTxFilter is not null) { _filterPipeline.Add(incomingTxFilter); } } public Transaction[] GetPendingTransactions() => _transactions.GetSnapshot(); public int GetPendingTransactionsCount() => _transactions.Count; public IDictionary<Address, Transaction[]> GetPendingTransactionsBySender() => _transactions.GetBucketSnapshot(); internal Transaction[] GetOwnPendingTransactions() => _broadcaster.GetSnapshot(); private void OnHeadChange(object? sender, BlockReplacementEventArgs e) { _hashCache.ClearCurrentBlockCache(); // we don't want this to be on main processing thread // TODO: I think this is dangerous if many blocks are processed one after another Task.Run(() => OnHeadChange(e.Block!, e.PreviousBlock)) .ContinueWith(t => { if (t.IsFaulted) { if (_logger.IsError) _logger.Error( $"Couldn't correctly add or remove transactions from txpool after processing block {e.Block!.ToString(Block.Format.FullHashAndNumber)}.", t.Exception); } }); } private void OnHeadChange(Block block, Block? previousBlock) { ReAddReorganisedTransactions(previousBlock); RemoveProcessedTransactions(block.Transactions); UpdateBuckets(); } private void ReAddReorganisedTransactions(Block? previousBlock) { if (previousBlock is not null) { bool isEip155Enabled = _specProvider.GetSpec(previousBlock.Number).IsEip155Enabled; for (int i = 0; i < previousBlock.Transactions.Length; i++) { Transaction tx = previousBlock.Transactions[i]; _hashCache.Delete(tx.Hash!); SubmitTx(tx, isEip155Enabled ? TxHandlingOptions.None : TxHandlingOptions.PreEip155Signing); } } } private void RemoveProcessedTransactions(IReadOnlyList<Transaction> blockTransactions) { long transactionsInBlock = blockTransactions.Count; long discoveredForPendingTxs = 0; long discoveredForHashCache = 0; long eip1559Txs = 0; for (int i = 0; i < transactionsInBlock; i++) { Keccak txHash = blockTransactions[i].Hash; if (!IsKnown(txHash!)) { discoveredForHashCache++; } if (!RemoveTransaction(txHash)) { discoveredForPendingTxs++; } if (blockTransactions[i].IsEip1559) { eip1559Txs++; } } if (transactionsInBlock != 0) { Metrics.DarkPoolRatioLevel1 = (float)discoveredForHashCache / transactionsInBlock; Metrics.DarkPoolRatioLevel2 = (float)discoveredForPendingTxs / transactionsInBlock; Metrics.Eip1559TransactionsRatio = (float)eip1559Txs / transactionsInBlock; } } public void AddPeer(ITxPoolPeer peer) { PeerInfo peerInfo = new(peer); if (_broadcaster.AddPeer(peerInfo)) { foreach (Transaction transaction in _transactions.GetSnapshot()) { _broadcaster.BroadcastOnce(peerInfo, transaction); } if (_logger.IsTrace) _logger.Trace($"Added a peer to TX pool: {peer}"); } } public void RemovePeer(PublicKey nodeId) { if (!_broadcaster.RemovePeer(nodeId)) { return; } if (_logger.IsTrace) _logger.Trace($"Removed a peer from TX pool: {nodeId}"); } public AddTxResult SubmitTx(Transaction tx, TxHandlingOptions handlingOptions) { Metrics.PendingTransactionsReceived++; // assign a sequence number to transaction so we can order them by arrival times when // gas prices are exactly the same tx.PoolIndex = Interlocked.Increment(ref _txIndex); NewDiscovered?.Invoke(this, new TxEventArgs(tx)); bool managedNonce = (handlingOptions & TxHandlingOptions.ManagedNonce) == TxHandlingOptions.ManagedNonce; bool startBroadcast = (handlingOptions & TxHandlingOptions.PersistentBroadcast) == TxHandlingOptions.PersistentBroadcast; if (_logger.IsTrace) _logger.Trace( $"Adding transaction {tx.ToString(" ")} - managed nonce: {managedNonce} | persistent broadcast {startBroadcast}"); for (int i = 0; i < _filterPipeline.Count; i++) { IIncomingTxFilter incomingTxFilter = _filterPipeline[i]; (bool accepted, AddTxResult? filteringResult) = incomingTxFilter.Accept(tx, handlingOptions); if (!accepted) { Metrics.PendingTransactionsDiscarded++; return filteringResult.Value; } } return AddCore(tx, startBroadcast); } private AddTxResult AddCore(Transaction tx, bool isPersistentBroadcast) { lock (_locker) { bool eip1559Enabled = _specProvider.GetSpec().IsEip1559Enabled; tx.GasBottleneck = tx.CalculateEffectiveGasPrice(eip1559Enabled, _headInfo.CurrentBaseFee); bool inserted = _transactions.TryInsert(tx.Hash, tx, out Transaction? removed); if (inserted) { _transactions.UpdateGroup(tx.SenderAddress!, UpdateBucketWithAddedTransaction); Metrics.PendingTransactionsAdded++; if (tx.IsEip1559) { Metrics.Pending1559TransactionsAdded++; } if (removed != null) { // transaction which was on last position in sorted TxPool and was deleted to give // a place for a newly added tx (with higher priority) is now removed from hashCache // to give it opportunity to come back to TxPool in the future, when fees drops _hashCache.Delete(removed.Hash!); Metrics.PendingTransactionsEvicted++; } } else { return AddTxResult.FeeTooLowToCompete; } } _broadcaster.BroadcastOnce(tx); if (isPersistentBroadcast) { _broadcaster.StartBroadcast(tx); } _hashCache.SetLongTerm(tx.Hash!); NewPending?.Invoke(this, new TxEventArgs(tx)); return AddTxResult.Added; } private IEnumerable<(Transaction Tx, Action<Transaction> Change)> UpdateBucketWithAddedTransaction( Address address, ICollection<Transaction> transactions) { if (transactions.Count != 0) { Account account = _accounts.GetAccount(address); UInt256 balance = account.Balance; long currentNonce = (long)(account.Nonce); foreach (var changedTx in UpdateGasBottleneck(transactions, currentNonce, balance)) { yield return changedTx; } } } private IEnumerable<(Transaction Tx, Action<Transaction> Change)> UpdateGasBottleneck( ICollection<Transaction> transactions, long currentNonce, UInt256 balance) { UInt256 previousTxBottleneck = UInt256.MaxValue; int i = 0; foreach (Transaction tx in transactions) { UInt256 gasBottleneck = 0; if (tx.Nonce < currentNonce) { if (tx.GasBottleneck != gasBottleneck) { yield return (tx, SetGasBottleneckChange(gasBottleneck)); } } else { if (previousTxBottleneck == UInt256.MaxValue) { previousTxBottleneck = tx.CalculateAffordableGasPrice(_specProvider.GetSpec().IsEip1559Enabled, _headInfo.CurrentBaseFee, balance); } if (tx.Nonce == currentNonce + i) { UInt256 effectiveGasPrice = tx.CalculateEffectiveGasPrice(_specProvider.GetSpec().IsEip1559Enabled, _headInfo.CurrentBaseFee); gasBottleneck = UInt256.Min(effectiveGasPrice, previousTxBottleneck); } if (tx.GasBottleneck != gasBottleneck) { yield return (tx, SetGasBottleneckChange(gasBottleneck)); } previousTxBottleneck = gasBottleneck; i++; } } } private static Action<Transaction> SetGasBottleneckChange(UInt256 gasBottleneck) { return t => t.GasBottleneck = gasBottleneck; } private void UpdateBuckets() { lock (_locker) { _transactions.UpdatePool(UpdateBucket); } } private IEnumerable<(Transaction Tx, Action<Transaction> Change)> UpdateBucket(Address address, ICollection<Transaction> transactions) { if (transactions.Count != 0) { Account? account = _accounts.GetAccount(address); UInt256 balance = account.Balance; long currentNonce = (long)(account.Nonce); Transaction tx = transactions.FirstOrDefault(t => t.Nonce == currentNonce); bool shouldBeDumped = false; if (tx is null) { shouldBeDumped = true; } else if (balance < tx.Value) { shouldBeDumped = true; } else if (!tx.IsEip1559) { shouldBeDumped = UInt256.MultiplyOverflow(tx.GasPrice, (UInt256)tx.GasLimit, out UInt256 cost); shouldBeDumped |= UInt256.AddOverflow(cost, tx.Value, out cost); shouldBeDumped |= balance < cost; } if (shouldBeDumped) { foreach (Transaction transaction in transactions) { yield return (transaction, SetGasBottleneckChange(0)); } } else { foreach (var changedTx in UpdateGasBottleneck(transactions, currentNonce, balance)) { yield return changedTx; } } } } public bool RemoveTransaction(Keccak? hash) { if (hash is null) { return false; } bool hasBeenRemoved; lock (_locker) { hasBeenRemoved = _transactions.TryRemove(hash, out Transaction transaction); if (hasBeenRemoved) { Address address = transaction.SenderAddress; if (_nonces.TryGetValue(address!, out AddressNonces addressNonces)) { addressNonces.Nonces.TryRemove(transaction.Nonce, out _); if (addressNonces.Nonces.IsEmpty) { _nonces.Remove(address, out _); } } RemovedPending?.Invoke(this, new TxEventArgs(transaction)); } _broadcaster.StopBroadcast(hash); } if (_logger.IsTrace) _logger.Trace($"Removed a transaction: {hash}"); return hasBeenRemoved; } public bool TryGetPendingTransaction(Keccak hash, out Transaction transaction) { lock (_locker) { if (!_transactions.TryGetValue(hash, out transaction)) { // commented out as it puts too much pressure on the database // and it not really required in any scenario // * tx recovery usually will fetch from pending // * get tx via RPC usually will fetch from block or from pending // * internal tx pool scenarios are handled directly elsewhere // transaction = _txStorage.Get(hash); } } return transaction != null; } // TODO: Ensure that nonce is always valid in case of sending own transactions from different nodes. public UInt256 ReserveOwnTransactionNonce(Address address) { UInt256 currentNonce = 0; _nonces.AddOrUpdate(address, a => { currentNonce = _accounts.GetAccount(address).Nonce; return new AddressNonces(currentNonce); }, (a, n) => { currentNonce = n.ReserveNonce().Value; return n; }); return currentNonce; } public bool IsKnown(Keccak hash) => _hashCache.Get(hash); public event EventHandler<TxEventArgs>? NewDiscovered; public event EventHandler<TxEventArgs>? NewPending; public event EventHandler<TxEventArgs>? RemovedPending; public void Dispose() { _broadcaster.Dispose(); _headInfo.HeadChanged -= OnHeadChange; } /// <summary> /// This method is used just for nice logging features in the console. /// </summary> private static void AddNodeInfoEntryForTxPool() { ThisNodeInfo.AddInfo("Mem est tx :", $"{(LruCache<Keccak, object>.CalculateMemorySize(32, MemoryAllowance.TxHashCacheSize) + LruCache<Keccak, Transaction>.CalculateMemorySize(4096, MemoryAllowance.MemPoolSize)) / 1000 / 1000}MB" .PadLeft(8)); } } }
1
25,828
How does this filter differ from TooExpensiveTxFilter?
NethermindEth-nethermind
.cs
@@ -217,6 +217,17 @@ public class Project { return users; } + public List<String> getGroupsWithPermission(final Type type) { + final ArrayList<String> groups = new ArrayList<>(); + for (final Map.Entry<String, Permission> entry : this.groupPermissionMap.entrySet()) { + final Permission perm = entry.getValue(); + if (perm.isPermissionSet(type)) { + groups.add(entry.getKey()); + } + } + return groups; + } + public List<Pair<String, Permission>> getUserPermissions() { final ArrayList<Pair<String, Permission>> permissions = new ArrayList<>();
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.project; import azkaban.flow.Flow; import azkaban.user.Permission; import azkaban.user.Permission.Type; import azkaban.user.User; import azkaban.utils.Pair; import com.google.common.collect.ImmutableMap; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; public class Project { private final int id; private final String name; private final LinkedHashMap<String, Permission> userPermissionMap = new LinkedHashMap<>(); private final LinkedHashMap<String, Permission> groupPermissionMap = new LinkedHashMap<>(); private final HashSet<String> proxyUsers = new HashSet<>(); private boolean active = true; private String description; private int version = -1; private long createTimestamp; private long lastModifiedTimestamp; private String lastModifiedUser; private String source; private Map<String, Flow> flows = new HashMap<>(); private Map<String, Object> metadata = new HashMap<>(); public Project(final int id, final String name) { this.id = id; this.name = name; } public static Project projectFromObject(final Object object) { final Map<String, Object> projectObject = (Map<String, Object>) object; final int id = (Integer) projectObject.get("id"); final String name = (String) projectObject.get("name"); final String description = (String) projectObject.get("description"); final String lastModifiedUser = (String) projectObject.get("lastModifiedUser"); final long createTimestamp = coerceToLong(projectObject.get("createTimestamp")); final long lastModifiedTimestamp = coerceToLong(projectObject.get("lastModifiedTimestamp")); final String source = (String) projectObject.get("source"); Boolean active = (Boolean) projectObject.get("active"); active = active == null ? true : active; final int version = (Integer) projectObject.get("version"); final Map<String, Object> metadata = (Map<String, Object>) projectObject.get("metadata"); final Project project = new Project(id, name); project.setVersion(version); project.setDescription(description); project.setCreateTimestamp(createTimestamp); project.setLastModifiedTimestamp(lastModifiedTimestamp); project.setLastModifiedUser(lastModifiedUser); project.setActive(active); if (source != null) { project.setSource(source); } if (metadata != null) { project.setMetadata(metadata); } final List<String> proxyUserList = (List<String>) projectObject.get("proxyUsers"); project.addAllProxyUsers(proxyUserList); return project; } private static long coerceToLong(final Object obj) { if (obj == null) { return 0; } else if (obj instanceof Integer) { return (Integer) obj; } return (Long) obj; } public String getName() { return this.name; } public Flow getFlow(final String flowId) { if (this.flows == null) { return null; } return this.flows.get(flowId); } public Map<String, Flow> getFlowMap() { return this.flows; } public List<Flow> getFlows() { List<Flow> retFlow = null; if (this.flows != null) { retFlow = new ArrayList<>(this.flows.values()); } else { retFlow = new ArrayList<>(); } return retFlow; } public void setFlows(final Map<String, Flow> flows) { this.flows = ImmutableMap.copyOf(flows); } public Permission getCollectivePermission(final User user) { final Permission permissions = new Permission(); Permission perm = this.userPermissionMap.get(user.getUserId()); if (perm != null) { permissions.addPermissions(perm); } for (final String group : user.getGroups()) { perm = this.groupPermissionMap.get(group); if (perm != null) { permissions.addPermissions(perm); } } return permissions; } public Set<String> getProxyUsers() { return new HashSet<>(this.proxyUsers); } public void addAllProxyUsers(final Collection<String> proxyUsers) { this.proxyUsers.addAll(proxyUsers); } public boolean hasProxyUser(final String proxy) { return this.proxyUsers.contains(proxy); } public void addProxyUser(final String user) { this.proxyUsers.add(user); } public void removeProxyUser(final String user) { this.proxyUsers.remove(user); } public boolean hasPermission(final User user, final Type type) { final Permission perm = this.userPermissionMap.get(user.getUserId()); if (perm != null && (perm.isPermissionSet(Type.ADMIN) || perm.isPermissionSet(type))) { return true; } return hasGroupPermission(user, type); } public boolean hasUserPermission(final User user, final Type type) { final Permission perm = this.userPermissionMap.get(user.getUserId()); if (perm == null) { // Check group return false; } if (perm.isPermissionSet(Type.ADMIN) || perm.isPermissionSet(type)) { return true; } return false; } public boolean hasGroupPermission(final User user, final Type type) { for (final String group : user.getGroups()) { final Permission perm = this.groupPermissionMap.get(group); if (perm != null) { if (perm.isPermissionSet(Type.ADMIN) || perm.isPermissionSet(type)) { return true; } } } return false; } public List<String> getUsersWithPermission(final Type type) { final ArrayList<String> users = new ArrayList<>(); for (final Map.Entry<String, Permission> entry : this.userPermissionMap.entrySet()) { final Permission perm = entry.getValue(); if (perm.isPermissionSet(type)) { users.add(entry.getKey()); } } return users; } public List<Pair<String, Permission>> getUserPermissions() { final ArrayList<Pair<String, Permission>> permissions = new ArrayList<>(); for (final Map.Entry<String, Permission> entry : this.userPermissionMap.entrySet()) { permissions.add(new Pair<>(entry.getKey(), entry .getValue())); } return permissions; } public List<Pair<String, Permission>> getGroupPermissions() { final ArrayList<Pair<String, Permission>> permissions = new ArrayList<>(); for (final Map.Entry<String, Permission> entry : this.groupPermissionMap.entrySet()) { permissions.add(new Pair<>(entry.getKey(), entry .getValue())); } return permissions; } public String getDescription() { return this.description; } public void setDescription(final String description) { this.description = description; } public void setUserPermission(final String userid, final Permission perm) { this.userPermissionMap.put(userid, perm); } public void setGroupPermission(final String group, final Permission perm) { this.groupPermissionMap.put(group, perm); } public Permission getUserPermission(final User user) { return this.userPermissionMap.get(user.getUserId()); } public Permission getGroupPermission(final String group) { return this.groupPermissionMap.get(group); } public Permission getUserPermission(final String userID) { return this.userPermissionMap.get(userID); } public void removeGroupPermission(final String group) { this.groupPermissionMap.remove(group); } public void removeUserPermission(final String userId) { this.userPermissionMap.remove(userId); } public void clearUserPermission() { this.userPermissionMap.clear(); } public long getCreateTimestamp() { return this.createTimestamp; } public void setCreateTimestamp(final long createTimestamp) { this.createTimestamp = createTimestamp; } public long getLastModifiedTimestamp() { return this.lastModifiedTimestamp; } public void setLastModifiedTimestamp(final long lastModifiedTimestamp) { this.lastModifiedTimestamp = lastModifiedTimestamp; } public Object toObject() { final HashMap<String, Object> projectObject = new HashMap<>(); projectObject.put("id", this.id); projectObject.put("name", this.name); projectObject.put("description", this.description); projectObject.put("createTimestamp", this.createTimestamp); projectObject.put("lastModifiedTimestamp", this.lastModifiedTimestamp); projectObject.put("lastModifiedUser", this.lastModifiedUser); projectObject.put("version", this.version); if (!this.active) { projectObject.put("active", false); } if (this.source != null) { projectObject.put("source", this.source); } if (this.metadata != null) { projectObject.put("metadata", this.metadata); } final ArrayList<String> proxyUserList = new ArrayList<>(this.proxyUsers); projectObject.put("proxyUsers", proxyUserList); return projectObject; } public String getLastModifiedUser() { return this.lastModifiedUser; } public void setLastModifiedUser(final String lastModifiedUser) { this.lastModifiedUser = lastModifiedUser; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (this.active ? 1231 : 1237); result = prime * result + (int) (this.createTimestamp ^ (this.createTimestamp >>> 32)); result = prime * result + ((this.description == null) ? 0 : this.description.hashCode()); result = prime * result + this.id; result = prime * result + (int) (this.lastModifiedTimestamp ^ (this.lastModifiedTimestamp >>> 32)); result = prime * result + ((this.lastModifiedUser == null) ? 0 : this.lastModifiedUser.hashCode()); result = prime * result + ((this.name == null) ? 0 : this.name.hashCode()); result = prime * result + ((this.source == null) ? 0 : this.source.hashCode()); result = prime * result + this.version; return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final Project other = (Project) obj; if (this.active != other.active) { return false; } if (this.createTimestamp != other.createTimestamp) { return false; } if (this.description == null) { if (other.description != null) { return false; } } else if (!this.description.equals(other.description)) { return false; } if (this.id != other.id) { return false; } if (this.lastModifiedTimestamp != other.lastModifiedTimestamp) { return false; } if (this.lastModifiedUser == null) { if (other.lastModifiedUser != null) { return false; } } else if (!this.lastModifiedUser.equals(other.lastModifiedUser)) { return false; } if (this.name == null) { if (other.name != null) { return false; } } else if (!this.name.equals(other.name)) { return false; } if (this.source == null) { if (other.source != null) { return false; } } else if (!this.source.equals(other.source)) { return false; } if (this.version != other.version) { return false; } return true; } public String getSource() { return this.source; } public void setSource(final String source) { this.source = source; } public Map<String, Object> getMetadata() { if (this.metadata == null) { this.metadata = new HashMap<>(); } return this.metadata; } protected void setMetadata(final Map<String, Object> metadata) { this.metadata = metadata; } public int getId() { return this.id; } public boolean isActive() { return this.active; } public void setActive(final boolean active) { this.active = active; } public int getVersion() { return this.version; } public void setVersion(final int version) { this.version = version; } }
1
18,357
Use " List<String>" instead of ArrayList<String> in declaration.
azkaban-azkaban
java
@@ -93,8 +93,10 @@ class ObjectStore(six.with_metaclass(ABCMeta)): return self.set_object(obj, context, runtime_type, paths) def get_value(self, context, runtime_type, paths): - if runtime_type in self.TYPE_REGISTRY: - return self.TYPE_REGISTRY[runtime_type].get_object(self, context, runtime_type, paths) + if runtime_type.name is not None and runtime_type.name in self.TYPE_REGISTRY: + return self.TYPE_REGISTRY[runtime_type.name].get_object( + self, context, runtime_type, paths + ) return self.get_object(context, runtime_type, paths)
1
import os import shutil from abc import ABCMeta, abstractmethod from io import BytesIO import six from dagster import check, seven from dagster.utils import mkdir_p from .execution_context import SystemPipelineExecutionContext from .runs import RunStorageMode from .types.runtime import RuntimeType, resolve_to_runtime_type def ensure_boto_requirements(): '''Check that boto3 and botocore are importable -- required for S3ObjectStore.''' try: import boto3 import botocore # pylint: disable=unused-import except ImportError: raise check.CheckError( 'boto3 and botocore must both be available for import in order to make use of ' 'an S3ObjectStore' ) return (boto3, botocore) class TypeStoragePlugin(six.with_metaclass(ABCMeta)): # pylint: disable=no-init '''Base class for storage plugins. Extend this class for (storage_mode, runtime_type) pairs that need special handling. ''' @classmethod @abstractmethod def set_object(cls, object_store, obj, context, runtime_type, paths): check.subclass_param(object_store, 'object_store', ObjectStore) return object_store.set_object(obj, context, runtime_type, paths) @classmethod @abstractmethod def get_object(cls, object_store, context, runtime_type, paths): check.subclass_param(object_store, 'object_store', ObjectStore) return object_store.get_object(context, runtime_type, paths) class ObjectStore(six.with_metaclass(ABCMeta)): def __init__(self, types_to_register=None): types_to_register = check.opt_dict_param( types_to_register, 'types_to_register', key_type=RuntimeType, value_class=TypeStoragePlugin, ) self.TYPE_REGISTRY = {} for type_to_register, type_storage_plugin in types_to_register.items(): self.register_type(type_to_register, type_storage_plugin) def register_type(self, type_to_register, type_storage_plugin): check.inst_param(type_to_register, 'type_to_register', RuntimeType) check.subclass_param(type_storage_plugin, 'type_storage_plugin', TypeStoragePlugin) check.invariant( type_to_register.name is not None, 'Cannot register a type storage plugin for an anonymous type', ) self.TYPE_REGISTRY[type_to_register.name] = type_storage_plugin @abstractmethod def set_object(self, obj, context, runtime_type, paths): pass @abstractmethod def get_object(self, context, runtime_type, paths): pass @abstractmethod def has_object(self, context, paths): pass @abstractmethod def rm_object(self, context, paths): pass def set_value(self, obj, context, runtime_type, paths): if runtime_type.name is not None and runtime_type.name in self.TYPE_REGISTRY: return self.TYPE_REGISTRY[runtime_type.name].set_object( self, obj, context, runtime_type, paths ) return self.set_object(obj, context, runtime_type, paths) def get_value(self, context, runtime_type, paths): if runtime_type in self.TYPE_REGISTRY: return self.TYPE_REGISTRY[runtime_type].get_object(self, context, runtime_type, paths) return self.get_object(context, runtime_type, paths) def get_run_files_directory(run_id): return os.path.join(seven.get_system_temp_directory(), 'dagster', 'runs', run_id, 'files') def get_valid_target_path(base_dir, paths): if len(paths) > 1: target_dir = os.path.join(base_dir, *paths[:-1]) mkdir_p(target_dir) return os.path.join(target_dir, paths[-1]) else: check.invariant(len(paths) == 1) target_dir = base_dir mkdir_p(target_dir) return os.path.join(target_dir, paths[0]) class FileSystemObjectStore(ObjectStore): def __init__(self, run_id, types_to_register=None): self.run_id = check.str_param(run_id, 'run_id') self.storage_mode = RunStorageMode.FILESYSTEM self.root = get_run_files_directory(run_id) super(FileSystemObjectStore, self).__init__(types_to_register) def set_object(self, obj, context, runtime_type, paths): # pylint: disable=unused-argument check.inst_param(context, 'context', SystemPipelineExecutionContext) check.inst_param(runtime_type, 'runtime_type', RuntimeType) check.list_param(paths, 'paths', of_type=str) check.param_invariant(len(paths) > 0, 'paths') target_path = get_valid_target_path(self.root, paths) check.invariant(not os.path.exists(target_path)) # This is not going to be right in the general case, e.g. for types like Spark # datasets/dataframes, which naturally serialize to # union(parquet_file, directory([parquet_file])) -- we will need a) to pass the # object store into the serializer and b) to provide sugar for the common case where # we don't need to do anything other than open the target path as a binary file with open(target_path, 'wb') as ff: runtime_type.serialization_strategy.serialize_value(context, obj, ff) return target_path def get_object(self, context, runtime_type, paths): # pylint: disable=unused-argument check.list_param(paths, 'paths', of_type=str) check.inst_param(runtime_type, 'runtime_type', RuntimeType) check.param_invariant(len(paths) > 0, 'paths') target_path = os.path.join(self.root, *paths) with open(target_path, 'rb') as ff: return runtime_type.serialization_strategy.deserialize_value(context, ff) def has_object(self, context, paths): # pylint: disable=unused-argument target_path = os.path.join(self.root, *paths) return os.path.isfile(target_path) def rm_object(self, context, paths): # pylint: disable=unused-argument target_path = os.path.join(self.root, *paths) if not self.has_object(context, paths): return os.unlink(target_path) return def copy_object_from_prev_run( self, context, previous_run_id, paths ): # pylint: disable=unused-argument prev_run_files_dir = get_run_files_directory(previous_run_id) check.invariant(os.path.isdir(prev_run_files_dir)) copy_from_path = os.path.join(prev_run_files_dir, *paths) copy_to_path = get_valid_target_path(self.root, paths) check.invariant( not os.path.exists(copy_to_path), 'Path already exists {}'.format(copy_to_path) ) if os.path.isfile(copy_from_path): shutil.copy(copy_from_path, copy_to_path) elif os.path.isdir(copy_from_path): shutil.copytree(copy_from_path, copy_to_path) else: check.failed('should not get here') class S3ObjectStore(ObjectStore): def __init__(self, s3_bucket, run_id, types_to_register=None): boto3, _ = ensure_boto_requirements() check.str_param(run_id, 'run_id') self.s3 = boto3.client('s3') self.bucket = s3_bucket self.run_id = run_id self.s3.head_bucket(Bucket=self.bucket) self.root = '{bucket}/runs/{run_id}/files'.format(bucket=self.bucket, run_id=self.run_id) self.storage_mode = RunStorageMode.S3 super(S3ObjectStore, self).__init__(types_to_register) def key_for_paths(self, paths): return '/'.join([self.root] + paths) def set_object(self, obj, context, runtime_type, paths): ensure_boto_requirements() check.inst_param(context, 'context', SystemPipelineExecutionContext) check.inst_param(runtime_type, 'runtime_type', RuntimeType) check.list_param(paths, 'paths', of_type=str) check.param_invariant(len(paths) > 0, 'paths') key = self.key_for_paths(paths) check.invariant( not self.has_object(context, paths), 'Key already exists: {key}!'.format(key=key) ) with BytesIO() as bytes_io: runtime_type.serialization_strategy.serialize_value(context, obj, bytes_io) bytes_io.seek(0) self.s3.put_object(Bucket=self.bucket, Key=key, Body=bytes_io) return 's3://{bucket}/{key}'.format(bucket=self.bucket, key=key) def get_object(self, context, runtime_type, paths): ensure_boto_requirements() check.inst_param(context, 'context', SystemPipelineExecutionContext) check.inst_param(runtime_type, 'runtime_type', RuntimeType) check.list_param(paths, 'paths', of_type=str) check.param_invariant(len(paths) > 0, 'paths') key = self.key_for_paths(paths) return runtime_type.serialization_strategy.deserialize_value( context, BytesIO(self.s3.get_object(Bucket=self.bucket, Key=key)['Body'].read()) ) def has_object(self, context, paths): # pylint: disable=unused-argument _, botocore = ensure_boto_requirements() key = self.key_for_paths(paths) try: self.s3.head_object(Bucket=self.bucket, Key=key) return True except botocore.exceptions.ClientError as exc: # pylint: disable=undefined-variable if exc.response.get('Error', {}).get('Code') == '404': return False raise def rm_object(self, context, paths): ensure_boto_requirements() if not self.has_object(context, paths): return key = self.key_for_paths(paths) self.s3.delete_object(Bucket=self.bucket, Key=key) return def copy_object_from_prev_run( self, context, previous_run_id, paths ): # pylint: disable=unused-argument check.not_implemented('not supported: TODO for max. put issue number here') def get_fs_paths(step_key, output_name): return ['intermediates', step_key, output_name] def get_filesystem_intermediate(run_id, step_key, dagster_type, output_name='result'): object_store = FileSystemObjectStore(run_id) return object_store.get_object( context=None, runtime_type=resolve_to_runtime_type(dagster_type), paths=get_fs_paths(step_key, output_name), ) def has_filesystem_intermediate(run_id, step_key, output_name='result'): object_store = FileSystemObjectStore(run_id) return object_store.has_object(context=None, paths=get_fs_paths(step_key, output_name)) def get_s3_intermediate(context, s3_bucket, run_id, step_key, dagster_type, output_name='result'): object_store = S3ObjectStore(s3_bucket, run_id) return object_store.get_object( context=context, runtime_type=resolve_to_runtime_type(dagster_type), paths=get_fs_paths(step_key, output_name), ) def has_s3_intermediate(context, s3_bucket, run_id, step_key, output_name='result'): object_store = S3ObjectStore(s3_bucket, run_id) return object_store.has_object(context=context, paths=get_fs_paths(step_key, output_name)) def rm_s3_intermediate(context, s3_bucket, run_id, step_key, output_name='result'): object_store = S3ObjectStore(s3_bucket, run_id) return object_store.rm_object(context=context, paths=get_fs_paths(step_key, output_name)) def construct_type_registry(pipeline_def, storage_mode): return { type_obj: type_obj.storage_plugins.get(storage_mode) for type_obj in pipeline_def.all_runtime_types() if type_obj.storage_plugins.get(storage_mode) }
1
12,929
we might consider hard throwing when name is None since that is explicitly not working right now and then linking to issue in the exception error message
dagster-io-dagster
py
@@ -1,13 +1,19 @@ package edu.harvard.iq.dataverse; +import edu.harvard.iq.dataverse.util.BundleUtil; import java.io.Serializable; +import java.util.MissingResourceException; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; +import javax.persistence.NamedNativeQueries; +import javax.persistence.NamedNativeQuery; +import javax.persistence.NamedQueries; +import javax.persistence.NamedQuery; /** *
1
package edu.harvard.iq.dataverse; import java.io.Serializable; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; /** * * @author ekraffmiller * Represents a generic file that is associated with a dataFile. * This is a data representation of a physical file in StorageIO */ @Entity public class AuxiliaryFile implements Serializable { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; /** * The data file that this AuxiliaryFile belongs to * a data file may have many auxiliaryFiles */ @ManyToOne @JoinColumn(nullable=false) private DataFile dataFile; private String formatTag; private String formatVersion; private String origin; private boolean isPublic; private String contentType; private Long fileSize; private String checksum; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public DataFile getDataFile() { return dataFile; } public void setDataFile(DataFile dataFile) { this.dataFile = dataFile; } public String getFormatTag() { return formatTag; } public void setFormatTag(String formatTag) { this.formatTag = formatTag; } public String getFormatVersion() { return formatVersion; } public void setFormatVersion(String formatVersion) { this.formatVersion = formatVersion; } public String getOrigin() { return origin; } public void setOrigin(String origin) { this.origin = origin; } public boolean getIsPublic() { return isPublic; } public void setIsPublic(boolean isPublic) { this.isPublic = isPublic; } public String getContentType() { return this.contentType; } public void setContentType(String contentType) { this.contentType = contentType; } public Long getFileSize() { return fileSize; } public void setFileSize(long fileSize) { this.fileSize = fileSize; } public String getChecksum() { return checksum; } public void setChecksum(String checksum) { this.checksum = checksum; } }
1
44,206
Just noticed this - why "like" and not straight "="
IQSS-dataverse
java
@@ -1,15 +1,16 @@ require 'spec_helper' describe Travis::Build::Data::Env do - let(:data) { stub('data', pull_request: '100', config: { env: 'FOO=foo' }, build: {}, job: {}, repository: {}) } + let(:data) { stub('data', pull_request: '100', config: { env: 'FOO=foo' }, build: { id: '1', number: '1' }, job: { id: '1', number: '1.1', branch: 'master', commit: '313f61b', commit_range: '313f61b..313f61a' }, repository: { slug: 'travis-ci/travis-ci' }) } let(:env) { described_class.new(data) } it 'vars respond to :key' do env.vars.first.should respond_to(:key) end - it 'includes travis env vars' do - env.vars.first.key.should =~ /^TRAVIS_/ + it 'includes all travis env vars' do + travis_vars = env.vars.select { |v| v.key =~ /^TRAVIS_/ && v.value && v.value.length > 0 } + travis_vars.length.should == 11 end it 'includes config env vars' do
1
require 'spec_helper' describe Travis::Build::Data::Env do let(:data) { stub('data', pull_request: '100', config: { env: 'FOO=foo' }, build: {}, job: {}, repository: {}) } let(:env) { described_class.new(data) } it 'vars respond to :key' do env.vars.first.should respond_to(:key) end it 'includes travis env vars' do env.vars.first.key.should =~ /^TRAVIS_/ end it 'includes config env vars' do env.vars.last.key.should == 'FOO' end it 'does not export secure env vars for pull requests' do data.stubs(:config).returns(env: 'SECURE FOO=foo') env.vars.last.key.should_not == 'FOO' end end
1
10,468
is this a new test? if yes, isn't it better to check for each env var is present, and the value, instead of a count?
travis-ci-travis-build
rb
@@ -160,10 +160,10 @@ public class MicroserviceVersions { setInstances(pulledInstances, rev); validated = true; } catch (Throwable e) { - LOGGER.error("Failed to setInstances, appId={}, microserviceName={}.", - getAppId(), - getMicroserviceName(), - e); + LOGGER.error(String.format("Failed to setInstances, AppId=%s, MicroserviceName=%s.", + getAppId(), + getMicroserviceName()), + e); } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.serviceregistry.consumer; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import org.apache.servicecomb.foundation.common.concurrent.ConcurrentHashMapEx; import org.apache.servicecomb.foundation.common.utils.SPIServiceUtils; import org.apache.servicecomb.serviceregistry.RegistryUtils; import org.apache.servicecomb.serviceregistry.api.Const; import org.apache.servicecomb.serviceregistry.api.registry.MicroserviceInstance; import org.apache.servicecomb.serviceregistry.api.registry.MicroserviceInstanceStatus; import org.apache.servicecomb.serviceregistry.api.response.MicroserviceInstanceChangedEvent; import org.apache.servicecomb.serviceregistry.client.http.MicroserviceInstances; import org.apache.servicecomb.serviceregistry.config.ServiceRegistryConfig; import org.apache.servicecomb.serviceregistry.definition.DefinitionConst; import org.apache.servicecomb.serviceregistry.task.event.MicroserviceNotExistEvent; import org.apache.servicecomb.serviceregistry.task.event.PullMicroserviceVersionsInstancesEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.eventbus.Subscribe; public class MicroserviceVersions { private static final Logger LOGGER = LoggerFactory.getLogger(MicroserviceVersions.class); AppManager appManager; private String appId; private String microserviceName; // revision and pulledInstances directly equals to SC's response private String revision = null; private List<MicroserviceInstance> pulledInstances; // instances not always equals to pulledInstances // in the future: // pulledInstances means all instance // instances means available instance List<MicroserviceInstance> instances; // key is service id Map<String, MicroserviceVersion> versions = new ConcurrentHashMapEx<>(); // key is version rule Map<String, MicroserviceVersionRule> versionRules = new ConcurrentHashMapEx<>(); // process pulled instances and create versionRule must be protected by lock // otherwise maybe lost instance or version in versionRule private final Object lock = new Object(); // to avoid pull too many time // only pendingPullCount is 0, then do a real pull private AtomicInteger pendingPullCount = new AtomicInteger(); boolean validated = false; public MicroserviceVersions(AppManager appManager, String appId, String microserviceName) { this.appManager = appManager; this.appId = appId; this.microserviceName = microserviceName; LOGGER.info("create MicroserviceVersions, appId={}, microserviceName={}.", appId, microserviceName); appManager.getEventBus().register(this); } public boolean isValidated() { return validated; } public String getAppId() { return appId; } public String getMicroserviceName() { return microserviceName; } public Map<String, MicroserviceVersion> getVersions() { return versions; } @SuppressWarnings("unchecked") public <T extends MicroserviceVersion> T getVersion(String serviceId) { return (T) versions.get(serviceId); } public String getRevision() { return revision; } public void setRevision(String revision) { this.revision = revision; } public List<MicroserviceInstance> getPulledInstances() { return pulledInstances; } public void submitPull() { pendingPullCount.incrementAndGet(); pullInstances(); } public void pullInstances() { if (pendingPullCount.decrementAndGet() != 0) { return; } MicroserviceInstances microserviceInstances = RegistryUtils.findServiceInstances(appId, microserviceName, DefinitionConst.VERSION_RULE_ALL, revision); if (microserviceInstances == null) { return; } if (microserviceInstances.isMicroserviceNotExist()) { appManager.getEventBus().post(new MicroserviceNotExistEvent(appId, microserviceName)); return; } if (!microserviceInstances.isNeedRefresh()) { return; } pulledInstances = microserviceInstances.getInstancesResponse().getInstances(); pulledInstances.sort(Comparator.comparing(MicroserviceInstance::getInstanceId)); String rev = microserviceInstances.getRevision(); safeSetInstances(pulledInstances, rev); } protected void safeSetInstances(List<MicroserviceInstance> pulledInstances, String rev) { try { setInstances(pulledInstances, rev); validated = true; } catch (Throwable e) { LOGGER.error("Failed to setInstances, appId={}, microserviceName={}.", getAppId(), getMicroserviceName(), e); } } private void postPullInstanceEvent(long msTime) { pendingPullCount.incrementAndGet(); appManager.getEventBus().post(new PullMicroserviceVersionsInstancesEvent(this, msTime)); } private void setInstances(List<MicroserviceInstance> pulledInstances, String rev) { synchronized (lock) { instances = mergeInstances(pulledInstances, instances); for (MicroserviceInstance instance : instances) { // ensure microserviceVersion exists versions.computeIfAbsent(instance.getServiceId(), microserviceId -> { MicroserviceVersion microserviceVersion = appManager.getMicroserviceVersionFactory().create(microserviceName, microserviceId); for (MicroserviceVersionRule microserviceVersionRule : versionRules.values()) { microserviceVersionRule.addMicroserviceVersion(microserviceVersion); } return microserviceVersion; }); } for (MicroserviceVersionRule microserviceVersionRule : versionRules.values()) { microserviceVersionRule.setInstances(instances); } revision = rev; } } private List<MicroserviceInstance> mergeInstances(List<MicroserviceInstance> pulledInstances, List<MicroserviceInstance> inUseInstances) { List<MicroserviceInstance> upInstances = pulledInstances .stream() .filter(instance -> MicroserviceInstanceStatus.UP.equals(instance.getStatus())) .collect(Collectors.toList()); if (upInstances.isEmpty() && inUseInstances != null && ServiceRegistryConfig.INSTANCE .isEmptyInstanceProtectionEnabled()) { MicroserviceInstancePing ping = SPIServiceUtils.getPriorityHighestService(MicroserviceInstancePing.class); inUseInstances.stream() .forEach(instance -> { if (!upInstances.contains(instance)) { if (ping.ping(instance)) { upInstances.add(instance); } } }); } return upInstances; } public MicroserviceVersionRule getOrCreateMicroserviceVersionRule(String versionRule) { // do not use computeIfAbsent MicroserviceVersionRule microserviceVersionRule = versionRules.get(versionRule); if (microserviceVersionRule == null) { synchronized (lock) { microserviceVersionRule = versionRules.computeIfAbsent(versionRule, this::createAndInitMicroserviceVersionRule); } } return microserviceVersionRule; } protected MicroserviceVersionRule createAndInitMicroserviceVersionRule(String strVersionRule) { LOGGER.info("create MicroserviceVersionRule, appId={}, microserviceName={}, versionRule={}.", appId, microserviceName, strVersionRule); MicroserviceVersionRule microserviceVersionRule = new MicroserviceVersionRule(appId, microserviceName, strVersionRule); for (MicroserviceVersion microserviceVersion : versions.values()) { microserviceVersionRule.addMicroserviceVersion(microserviceVersion); } microserviceVersionRule.setInstances(instances); return microserviceVersionRule; } @Subscribe public void onMicroserviceInstanceChanged(MicroserviceInstanceChangedEvent changedEvent) { if (!isEventAccept(changedEvent)) { return; } // pull instances always replace old instances, not append // // pull result and watch event sequence is not defined even inside SC. // it's not safe to trust the event, so we just send a new pull request // // CREATE/UPDATE: // if pull 1/2/3, and then add 4, but "add 4" received before pull result, will lost 4. // DELETE: // if pull 1/2/3, and then delete 3, but "delete 3" received before pull result, will have wrong 3. // EXPIRE:: // black/white config in SC changed, we must refresh all data from sc. postPullInstanceEvent(0); } protected boolean isEventAccept(MicroserviceInstanceChangedEvent changedEvent) { return (appId.equals(changedEvent.getKey().getAppId()) && microserviceName.equals(changedEvent.getKey().getServiceName())) || microserviceName.equals( changedEvent.getKey().getAppId() + Const.APP_SERVICE_SEPARATOR + changedEvent.getKey().getServiceName()); } }
1
10,661
when will lost exception? by my test, never happened.
apache-servicecomb-java-chassis
java
@@ -147,7 +147,8 @@ class BrowserPage(QWebPage): title="Open external application for {}-link?".format(scheme), text="URL: <b>{}</b>".format( html.escape(url.toDisplayString())), - yes_action=functools.partial(QDesktopServices.openUrl, url)) + yes_action=functools.partial(QDesktopServices.openUrl, url), + url=urlstr) return True elif (info.domain, info.error) in ignored_errors: log.webview.debug("Ignored error on {}: {} (error domain: {}, "
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The main browser widgets.""" import html import functools from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QUrl, QPoint from PyQt5.QtGui import QDesktopServices from PyQt5.QtNetwork import QNetworkReply, QNetworkRequest from PyQt5.QtWidgets import QFileDialog from PyQt5.QtPrintSupport import QPrintDialog from PyQt5.QtWebKitWidgets import QWebPage, QWebFrame from qutebrowser.config import config from qutebrowser.browser import pdfjs, shared from qutebrowser.browser.webkit import http from qutebrowser.browser.webkit.network import networkmanager from qutebrowser.utils import (message, usertypes, log, jinja, objreg, debug, urlutils) class BrowserPage(QWebPage): """Our own QWebPage with advanced features. Attributes: error_occurred: Whether an error occurred while loading. _extension_handlers: Mapping of QWebPage extensions to their handlers. _networkmanager: The NetworkManager used. _win_id: The window ID this BrowserPage is associated with. _ignore_load_started: Whether to ignore the next loadStarted signal. _is_shutting_down: Whether the page is currently shutting down. _tabdata: The TabData object of the tab this page is in. Signals: shutting_down: Emitted when the page is currently shutting down. reloading: Emitted before a web page reloads. arg: The URL which gets reloaded. """ shutting_down = pyqtSignal() reloading = pyqtSignal(QUrl) def __init__(self, win_id, tab_id, tabdata, private, parent=None): super().__init__(parent) self._win_id = win_id self._tabdata = tabdata self._is_shutting_down = False self._extension_handlers = { QWebPage.ErrorPageExtension: self._handle_errorpage, QWebPage.ChooseMultipleFilesExtension: self._handle_multiple_files, } self._ignore_load_started = False self.error_occurred = False self.open_target = usertypes.ClickTarget.normal self._networkmanager = networkmanager.NetworkManager( win_id=win_id, tab_id=tab_id, private=private, parent=self) self.setNetworkAccessManager(self._networkmanager) self.setForwardUnsupportedContent(True) self.reloading.connect(self._networkmanager.clear_rejected_ssl_errors) self.printRequested.connect(self.on_print_requested) self.downloadRequested.connect(self.on_download_requested) self.unsupportedContent.connect(self.on_unsupported_content) self.loadStarted.connect(self.on_load_started) self.featurePermissionRequested.connect( self._on_feature_permission_requested) self.saveFrameStateRequested.connect( self.on_save_frame_state_requested) self.restoreFrameStateRequested.connect( self.on_restore_frame_state_requested) self.loadFinished.connect( functools.partial(self._inject_userjs, self.mainFrame())) self.frameCreated.connect(self._connect_userjs_signals) @pyqtSlot('QWebFrame*') def _connect_userjs_signals(self, frame): """Connect userjs related signals to `frame`. Connect the signals used as triggers for injecting user JavaScripts into the passed QWebFrame. """ log.greasemonkey.debug("Connecting to frame {} ({})" .format(frame, frame.url().toDisplayString())) frame.loadFinished.connect( functools.partial(self._inject_userjs, frame)) def javaScriptPrompt(self, frame, js_msg, default): """Override javaScriptPrompt to use qutebrowser prompts.""" if self._is_shutting_down: return (False, "") try: return shared.javascript_prompt(frame.url(), js_msg, default, abort_on=[self.loadStarted, self.shutting_down]) except shared.CallSuper: return super().javaScriptPrompt(frame, js_msg, default) def _handle_errorpage(self, info, errpage): """Display an error page if needed. Loosely based on Helpviewer/HelpBrowserWV.py from eric5 (line 260 @ 5d937eb378dd) Args: info: The QWebPage.ErrorPageExtensionOption instance. errpage: The QWebPage.ErrorPageExtensionReturn instance, where the error page will get written to. Return: False if no error page should be displayed, True otherwise. """ ignored_errors = [ (QWebPage.QtNetwork, QNetworkReply.OperationCanceledError), # "Loading is handled by the media engine" (QWebPage.WebKit, 203), # "Frame load interrupted by policy change" (QWebPage.WebKit, 102), ] errpage.baseUrl = info.url urlstr = info.url.toDisplayString() if (info.domain, info.error) == (QWebPage.QtNetwork, QNetworkReply.ProtocolUnknownError): # For some reason, we get a segfault when we use # QDesktopServices::openUrl with info.url directly - however it # works when we construct a copy of it. url = QUrl(info.url) scheme = url.scheme() message.confirm_async( title="Open external application for {}-link?".format(scheme), text="URL: <b>{}</b>".format( html.escape(url.toDisplayString())), yes_action=functools.partial(QDesktopServices.openUrl, url)) return True elif (info.domain, info.error) in ignored_errors: log.webview.debug("Ignored error on {}: {} (error domain: {}, " "error code: {})".format( urlstr, info.errorString, info.domain, info.error)) return False else: error_str = info.errorString if error_str == networkmanager.HOSTBLOCK_ERROR_STRING: # We don't set error_occurred in this case. error_str = "Request blocked by host blocker." main_frame = info.frame.page().mainFrame() if info.frame != main_frame: # Content in an iframe -> Hide the frame so it doesn't use # any space. We can't hide the frame's documentElement # directly though. for elem in main_frame.documentElement().findAll('iframe'): if QUrl(elem.attribute('src')) == info.url: elem.setAttribute('style', 'display: none') return False else: self._ignore_load_started = True self.error_occurred = True log.webview.error("Error while loading {}: {}".format( urlstr, error_str)) log.webview.debug("Error domain: {}, error code: {}".format( info.domain, info.error)) title = "Error loading page: {}".format(urlstr) error_html = jinja.render( 'error.html', title=title, url=urlstr, error=error_str) errpage.content = error_html.encode('utf-8') errpage.encoding = 'utf-8' return True def _handle_multiple_files(self, info, files): """Handle uploading of multiple files. Loosely based on Helpviewer/HelpBrowserWV.py from eric5. Args: info: The ChooseMultipleFilesExtensionOption instance. files: The ChooseMultipleFilesExtensionReturn instance to write return values to. Return: True on success, the superclass return value on failure. """ suggested_file = "" if info.suggestedFileNames: suggested_file = info.suggestedFileNames[0] files.fileNames, _ = QFileDialog.getOpenFileNames(None, None, suggested_file) return True def _show_pdfjs(self, reply): """Show the reply with pdfjs.""" try: page = pdfjs.generate_pdfjs_page(reply.url()) except pdfjs.PDFJSNotFound: page = jinja.render('no_pdfjs.html', url=reply.url().toDisplayString()) self.mainFrame().setContent(page.encode('utf-8'), 'text/html', reply.url()) reply.deleteLater() def shutdown(self): """Prepare the web page for being deleted.""" self._is_shutting_down = True self.shutting_down.emit() download_manager = objreg.get('qtnetwork-download-manager', scope='window', window=self._win_id) nam = self.networkAccessManager() if download_manager.has_downloads_with_nam(nam): nam.setParent(download_manager) else: nam.shutdown() def display_content(self, reply, mimetype): """Display a QNetworkReply with an explicitly set mimetype.""" self.mainFrame().setContent(reply.readAll(), mimetype, reply.url()) reply.deleteLater() def on_print_requested(self, frame): """Handle printing when requested via javascript.""" printdiag = QPrintDialog() printdiag.setAttribute(Qt.WA_DeleteOnClose) printdiag.open(lambda: frame.print(printdiag.printer())) @pyqtSlot('QNetworkRequest') def on_download_requested(self, request): """Called when the user wants to download a link. We need to construct a copy of the QNetworkRequest here as the download_manager needs it async and we'd get a segfault otherwise as soon as the user has entered the filename, as Qt seems to delete it after this slot returns. """ req = QNetworkRequest(request) download_manager = objreg.get('qtnetwork-download-manager', scope='window', window=self._win_id) download_manager.get_request(req, qnam=self.networkAccessManager()) @pyqtSlot('QNetworkReply*') def on_unsupported_content(self, reply): """Handle an unsupportedContent signal. Most likely this will mean we need to download the reply, but we correct for some common errors the server do. At some point we might want to implement the MIME Sniffing standard here: http://mimesniff.spec.whatwg.org/ """ inline, suggested_filename = http.parse_content_disposition(reply) download_manager = objreg.get('qtnetwork-download-manager', scope='window', window=self._win_id) if not inline: # Content-Disposition: attachment -> force download download_manager.fetch(reply, suggested_filename=suggested_filename) return mimetype, _rest = http.parse_content_type(reply) if mimetype == 'image/jpg': # Some servers (e.g. the LinkedIn CDN) send a non-standard # image/jpg (instead of image/jpeg, defined in RFC 1341 section # 7.5). If this is the case, we force displaying with a corrected # mimetype. if reply.isFinished(): self.display_content(reply, 'image/jpeg') else: reply.finished.connect(functools.partial( self.display_content, reply, 'image/jpeg')) elif (mimetype in ['application/pdf', 'application/x-pdf'] and config.val.content.pdfjs): # Use pdf.js to display the page self._show_pdfjs(reply) else: # Unknown mimetype, so download anyways. download_manager.fetch(reply, suggested_filename=suggested_filename) @pyqtSlot() def on_load_started(self): """Reset error_occurred when loading of a new page started.""" if self._ignore_load_started: self._ignore_load_started = False else: self.error_occurred = False def _inject_userjs(self, frame): """Inject user JavaScripts into the page. Args: frame: The QWebFrame to inject the user scripts into. """ url = frame.url() if url.isEmpty(): url = frame.requestedUrl() log.greasemonkey.debug("_inject_userjs called for {} ({})" .format(frame, url.toDisplayString())) greasemonkey = objreg.get('greasemonkey') scripts = greasemonkey.scripts_for(url) # QtWebKit has trouble providing us with signals representing # page load progress at reasonable times, so we just load all # scripts on the same event. toload = scripts.start + scripts.end + scripts.idle if url.isEmpty(): # This happens during normal usage like with view source but may # also indicate a bug. log.greasemonkey.debug("Not running scripts for frame with no " "url: {}".format(frame)) assert not toload, toload for script in toload: if frame is self.mainFrame() or script.runs_on_sub_frames: log.webview.debug('Running GM script: {}'.format(script.name)) frame.evaluateJavaScript(script.code()) @pyqtSlot('QWebFrame*', 'QWebPage::Feature') def _on_feature_permission_requested(self, frame, feature): """Ask the user for approval for geolocation/notifications.""" if not isinstance(frame, QWebFrame): # pragma: no cover # This makes no sense whatsoever, but someone reported this being # called with a QBuffer... log.misc.error("on_feature_permission_requested got called with " "{!r}!".format(frame)) return options = { QWebPage.Notifications: 'content.notifications', QWebPage.Geolocation: 'content.geolocation', } messages = { QWebPage.Notifications: 'show notifications', QWebPage.Geolocation: 'access your location', } yes_action = functools.partial( self.setFeaturePermission, frame, feature, QWebPage.PermissionGrantedByUser) no_action = functools.partial( self.setFeaturePermission, frame, feature, QWebPage.PermissionDeniedByUser) question = shared.feature_permission( url=frame.url(), option=options[feature], msg=messages[feature], yes_action=yes_action, no_action=no_action, abort_on=[self.shutting_down, self.loadStarted]) if question is not None: self.featurePermissionRequestCanceled.connect( functools.partial(self._on_feature_permission_cancelled, question, frame, feature)) def _on_feature_permission_cancelled(self, question, frame, feature, cancelled_frame, cancelled_feature): """Slot invoked when a feature permission request was cancelled. To be used with functools.partial. """ if frame is cancelled_frame and feature == cancelled_feature: try: question.abort() except RuntimeError: # The question could already be deleted, e.g. because it was # aborted after a loadStarted signal. pass def on_save_frame_state_requested(self, frame, item): """Save scroll position and zoom in history. Args: frame: The QWebFrame which gets saved. item: The QWebHistoryItem to be saved. """ if frame != self.mainFrame(): return data = { 'zoom': frame.zoomFactor(), 'scroll-pos': frame.scrollPosition(), } item.setUserData(data) def on_restore_frame_state_requested(self, frame): """Restore scroll position and zoom from history. Args: frame: The QWebFrame which gets restored. """ if frame != self.mainFrame(): return data = self.history().currentItem().userData() if data is None: return if 'zoom' in data: frame.page().view().tab.zoom.set_factor(data['zoom']) if 'scroll-pos' in data and frame.scrollPosition() == QPoint(0, 0): frame.setScrollPosition(data['scroll-pos']) def userAgentForUrl(self, url): """Override QWebPage::userAgentForUrl to customize the user agent.""" ua = config.val.content.headers.user_agent if ua is None: return super().userAgentForUrl(url) else: return ua def supportsExtension(self, ext): """Override QWebPage::supportsExtension to provide error pages. Args: ext: The extension to check for. Return: True if the extension can be handled, False otherwise. """ return ext in self._extension_handlers def extension(self, ext, opt, out): """Override QWebPage::extension to provide error pages. Args: ext: The extension. opt: Extension options instance. out: Extension output instance. Return: Handler return value. """ try: handler = self._extension_handlers[ext] except KeyError: log.webview.warning("Extension {} not supported!".format(ext)) return super().extension(ext, opt, out) return handler(opt, out) def javaScriptAlert(self, frame, js_msg): """Override javaScriptAlert to use qutebrowser prompts.""" if self._is_shutting_down: return try: shared.javascript_alert(frame.url(), js_msg, abort_on=[self.loadStarted, self.shutting_down]) except shared.CallSuper: super().javaScriptAlert(frame, js_msg) def javaScriptConfirm(self, frame, js_msg): """Override javaScriptConfirm to use the statusbar.""" if self._is_shutting_down: return False try: return shared.javascript_confirm(frame.url(), js_msg, abort_on=[self.loadStarted, self.shutting_down]) except shared.CallSuper: return super().javaScriptConfirm(frame, js_msg) def javaScriptConsoleMessage(self, msg, line, source): """Override javaScriptConsoleMessage to use debug log.""" shared.javascript_log_message(usertypes.JsLogLevel.unknown, source, line, msg) def acceptNavigationRequest(self, _frame: QWebFrame, request: QNetworkRequest, typ: QWebPage.NavigationType): """Override acceptNavigationRequest to handle clicked links. Setting linkDelegationPolicy to DelegateAllLinks and using a slot bound to linkClicked won't work correctly, because when in a frameset, we have no idea in which frame the link should be opened. Checks if it should open it in a tab (middle-click or control) or not, and then conditionally opens the URL here or in another tab/window. """ url = request.url() log.webview.debug("navigation request: url {}, type {}, " "target {} override {}".format( url.toDisplayString(), debug.qenum_key(QWebPage, typ), self.open_target, self._tabdata.override_target)) if self._tabdata.override_target is not None: target = self._tabdata.override_target self._tabdata.override_target = None else: target = self.open_target if typ == QWebPage.NavigationTypeReload: self.reloading.emit(url) return True elif typ != QWebPage.NavigationTypeLinkClicked: return True if not url.isValid(): msg = urlutils.get_errstring(url, "Invalid link clicked") message.error(msg) self.open_target = usertypes.ClickTarget.normal return False if target == usertypes.ClickTarget.normal: return True tab = shared.get_tab(self._win_id, target) tab.openurl(url) self.open_target = usertypes.ClickTarget.normal return False
1
20,593
You should re-stringify it here with `QUrl.FullyEncoded`.
qutebrowser-qutebrowser
py
@@ -128,9 +128,9 @@ func Wrap(ctx context.Context, topic Topic, msg []byte, recipient *ecdsa.PublicK return mine(ctx, odd, f) } -// Unwrap takes a chunk, a topic and a private key, and tries to decrypt the payload +// Unwrap takes a private key, a chunk, an array of possible topics and tries to decrypt the payload // using the private key, the prepended ephemeral public key for el-Gamal using the topic as salt -func Unwrap(ctx context.Context, key *ecdsa.PrivateKey, chunk swarm.Chunk, topics []Topic) (topic Topic, msg []byte, err error) { +func Unwrap(key *ecdsa.PrivateKey, chunk swarm.Chunk, quit chan struct{}, topics []Topic) (topic Topic, msg []byte, err error) { chunkData := chunk.Data() pubkey, err := extractPublicKey(chunkData) if err != nil {
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pss import ( "bytes" "context" "crypto/ecdsa" "encoding/binary" "encoding/hex" "errors" "fmt" random "math/rand" "github.com/btcsuite/btcd/btcec" "github.com/ethersphere/bee/pkg/bmtpool" "github.com/ethersphere/bee/pkg/crypto" "github.com/ethersphere/bee/pkg/encryption" "github.com/ethersphere/bee/pkg/encryption/elgamal" "github.com/ethersphere/bee/pkg/swarm" ) var ( // ErrPayloadTooBig is returned when a given payload for a Message type is longer than the maximum amount allowed ErrPayloadTooBig = fmt.Errorf("message payload size cannot be greater than %d bytes", MaxPayloadSize) // ErrEmptyTargets is returned when the given target list for a trojan chunk is empty ErrEmptyTargets = errors.New("target list cannot be empty") // ErrVarLenTargets is returned when the given target list for a trojan chunk has addresses of different lengths ErrVarLenTargets = errors.New("target list cannot have targets of different length") ) // Topic is the type that classifies messages, allows client applications to subscribe to type Topic [32]byte // NewTopic creates a new Topic from an input string by taking its hash func NewTopic(text string) Topic { bytes, _ := crypto.LegacyKeccak256([]byte(text)) var topic Topic copy(topic[:], bytes[:32]) return topic } // Target is an alias for a partial address (overlay prefix) serving as potential destination type Target []byte // Targets is an alias for a collection of targets type Targets []Target const ( // MaxPayloadSize is the maximum allowed payload size for the Message type, in bytes MaxPayloadSize = swarm.ChunkSize - 3*swarm.HashSize ) // Wrap creates a new serialised message with the given topic, payload and recipient public key used // for encryption // - span as topic hint (H(key|topic)[0:8]) to match topic // chunk payload: // - nonce is chosen so that the chunk address will have one of the targets as its prefix and thus will be forwarded to the neighbourhood of the recipient overlay address the target is derived from // trojan payload: // - ephemeral public key for el-Gamal encryption // ciphertext - plaintext: // - plaintext length encoding // - integrity protection // message: func Wrap(ctx context.Context, topic Topic, msg []byte, recipient *ecdsa.PublicKey, targets Targets) (swarm.Chunk, error) { if len(msg) > MaxPayloadSize { return nil, ErrPayloadTooBig } // integrity protection and plaintext msg length encoding integrity, err := crypto.LegacyKeccak256(msg) if err != nil { return nil, err } binary.BigEndian.PutUint16(integrity[:2], uint16(len(msg))) // integrity segment prepended to msg plaintext := append(integrity, msg...) // use el-Gamal with ECDH on an ephemeral key, recipient public key and topic as salt enc, ephpub, err := elgamal.NewEncryptor(recipient, topic[:], 4032, swarm.NewHasher) if err != nil { return nil, err } ciphertext, err := enc.Encrypt(plaintext) if err != nil { return nil, err } // prepend serialised ephemeral public key to the ciphertext // NOTE: only the random bytes of the compressed public key are used // in order not to leak anything, the one bit parity info of the magic byte // is encoded in the parity of the 28th byte of the mined nonce ephpubBytes := (*btcec.PublicKey)(ephpub).SerializeCompressed() payload := append(ephpubBytes[1:], ciphertext...) odd := ephpubBytes[0]&0x1 != 0 if err := checkTargets(targets); err != nil { return nil, err } targetsLen := len(targets[0]) // topic hash, the first 8 bytes is used as the span of the chunk hash, err := crypto.LegacyKeccak256(append(enc.Key(), topic[:]...)) if err != nil { return nil, err } hint := hash[:8] h := hasher(hint, payload) // f is evaluating the mined nonce // it accepts the nonce if it has the parity required by the ephemeral public key AND // the chunk hashes to an address matching one of the targets f := func(nonce []byte) (swarm.Chunk, error) { hash, err := h(nonce) if err != nil { return nil, err } if !contains(targets, hash[:targetsLen]) { return nil, nil } chunk := swarm.NewChunk(swarm.NewAddress(hash), append(hint, append(nonce, payload...)...)) return chunk, nil } return mine(ctx, odd, f) } // Unwrap takes a chunk, a topic and a private key, and tries to decrypt the payload // using the private key, the prepended ephemeral public key for el-Gamal using the topic as salt func Unwrap(ctx context.Context, key *ecdsa.PrivateKey, chunk swarm.Chunk, topics []Topic) (topic Topic, msg []byte, err error) { chunkData := chunk.Data() pubkey, err := extractPublicKey(chunkData) if err != nil { return Topic{}, nil, err } hint := chunkData[:8] for _, topic = range topics { select { case <-ctx.Done(): return Topic{}, nil, ctx.Err() default: } dec, err := matchTopic(key, pubkey, hint, topic[:]) if err != nil { privk := crypto.Secp256k1PrivateKeyFromBytes(topic[:]) dec, err = matchTopic(privk, pubkey, hint, topic[:]) if err != nil { continue } } ciphertext := chunkData[72:] msg, err = decryptAndCheck(dec, ciphertext) if err != nil { continue } break } return topic, msg, nil } // checkTargets verifies that the list of given targets is non empty and with elements of matching size func checkTargets(targets Targets) error { if len(targets) == 0 { return ErrEmptyTargets } validLen := len(targets[0]) // take first element as allowed length for i := 1; i < len(targets); i++ { if len(targets[i]) != validLen { return ErrVarLenTargets } } return nil } func hasher(span, b []byte) func([]byte) ([]byte, error) { return func(nonce []byte) ([]byte, error) { s := append(nonce, b...) hasher := bmtpool.Get() defer bmtpool.Put(hasher) if err := hasher.SetSpanBytes(span); err != nil { return nil, err } if _, err := hasher.Write(s); err != nil { return nil, err } return hasher.Sum(nil), nil } } // contains returns whether the given collection contains the given element func contains(col Targets, elem []byte) bool { for i := range col { if bytes.Equal(elem, col[i]) { return true } } return false } // mine iteratively enumerates different nonces until the address (BMT hash) of the chunkhas one of the targets as its prefix func mine(ctx context.Context, odd bool, f func(nonce []byte) (swarm.Chunk, error)) (swarm.Chunk, error) { seeds := make([]uint32, 8) for i := range seeds { seeds[i] = random.Uint32() } initnonce := make([]byte, 32) for i := 0; i < 8; i++ { binary.LittleEndian.PutUint32(initnonce[i*4:i*4+4], seeds[i]) } if odd { initnonce[28] |= 0x01 } else { initnonce[28] &= 0xfe } seeds[7] = binary.LittleEndian.Uint32(initnonce[28:32]) quit := make(chan struct{}) // make both errs and result channels buffered so they never block result := make(chan swarm.Chunk, 8) errs := make(chan error, 8) for i := 0; i < 8; i++ { go func(j int) { nonce := make([]byte, 32) copy(nonce, initnonce) for seed := seeds[j]; ; seed++ { binary.LittleEndian.PutUint32(nonce[j*4:j*4+4], seed) res, err := f(nonce) if err != nil { errs <- err return } if res != nil { result <- res return } select { case <-quit: return default: } } }(i) } defer close(quit) select { case <-ctx.Done(): return nil, ctx.Err() case err := <-errs: return nil, err case res := <-result: return res, nil } } // extracts ephemeral public key from the chunk data to use with el-Gamal func extractPublicKey(chunkData []byte) (*ecdsa.PublicKey, error) { pubkeyBytes := make([]byte, 33) pubkeyBytes[0] |= 0x2 copy(pubkeyBytes[1:], chunkData[40:72]) if chunkData[36]|0x1 != 0 { pubkeyBytes[0] |= 0x1 } pubkey, err := btcec.ParsePubKey(pubkeyBytes, btcec.S256()) return (*ecdsa.PublicKey)(pubkey), err } // topic is needed to decrypt the trojan payload, but no need to perform decryption with each // instead the hash of the secret key and the topic is matched against a hint (64 bit meta info)q // proper integrity check will disambiguate any potential collisions (false positives) // if the topic matches the hint, it returns the el-Gamal decryptor, otherwise an error func matchTopic(key *ecdsa.PrivateKey, pubkey *ecdsa.PublicKey, hint []byte, topic []byte) (encryption.Decrypter, error) { dec, err := elgamal.NewDecrypter(key, pubkey, topic, swarm.NewHasher) if err != nil { return nil, err } match, err := crypto.LegacyKeccak256(append(dec.Key(), topic...)) if err != nil { return nil, err } if !bytes.Equal(hint, match[:8]) { return nil, errors.New("topic does not match hint") } return dec, nil } // decrypts the ciphertext with an el-Gamal decryptor using a topic that matched the hint // the msg is extracted from the plaintext and its integrity is checked func decryptAndCheck(dec encryption.Decrypter, ciphertext []byte) ([]byte, error) { plaintext, err := dec.Decrypt(ciphertext) if err != nil { return nil, err } length := int(binary.BigEndian.Uint16(plaintext[:2])) if length > MaxPayloadSize { return nil, errors.New("invalid length") } msg := plaintext[32 : 32+length] integrity := plaintext[2:32] hash, err := crypto.LegacyKeccak256(msg) if err != nil { return nil, err } if !bytes.Equal(integrity, hash[2:]) { return nil, errors.New("invalid message") } // bingo return msg, nil } // ParseRecipient extract ephemeral public key from the hexadecimal string to use with el-Gamal. func ParseRecipient(recipientHexString string) (*ecdsa.PublicKey, error) { publicKeyBytes, err := hex.DecodeString(recipientHexString) if err != nil { return nil, err } pubkey, err := btcec.ParsePubKey(publicKeyBytes, btcec.S256()) if err != nil { return nil, err } return (*ecdsa.PublicKey)(pubkey), err }
1
13,062
i'm not sure why the quit channel is needed in this context
ethersphere-bee
go
@@ -64,13 +64,16 @@ func MakeIngressDeployment(args IngressArgs) *appsv1.Deployment { } container.Resources = corev1.ResourceRequirements{ Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("1000Mi"), + corev1.ResourceMemory: resource.MustParse(args.MemoryLimit), }, Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("500Mi"), - corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse(args.MemoryRequest), + corev1.ResourceCPU: resource.MustParse(args.CPURequest), }, } + if len(args.CPULimit) > 0 { + container.Resources.Limits[corev1.ResourceCPU] = resource.MustParse(args.CPULimit) + } return deploymentTemplate(args.Args, []corev1.Container{container}) }
1
/* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resources import ( "strconv" "github.com/google/knative-gcp/pkg/broker/handler" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "knative.dev/pkg/kmeta" "knative.dev/pkg/system" ) // MakeIngressDeployment creates the ingress Deployment object. func MakeIngressDeployment(args IngressArgs) *appsv1.Deployment { container := containerTemplate(args.Args) // Decorate the container template with ingress port. container.Env = append(container.Env, corev1.EnvVar{Name: "PORT", Value: strconv.Itoa(args.Port)}) container.Ports = append(container.Ports, corev1.ContainerPort{Name: "http", ContainerPort: int32(args.Port)}) container.ReadinessProbe = &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", Port: intstr.FromInt(args.Port), Scheme: corev1.URISchemeHTTP, }, }, FailureThreshold: 3, PeriodSeconds: 2, SuccessThreshold: 1, TimeoutSeconds: 5, } container.LivenessProbe = &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", Port: intstr.FromInt(args.Port), Scheme: corev1.URISchemeHTTP, }, }, FailureThreshold: 3, InitialDelaySeconds: 5, PeriodSeconds: 2, SuccessThreshold: 1, TimeoutSeconds: 5, } container.Resources = corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("1000Mi"), }, Requests: corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("500Mi"), corev1.ResourceCPU: resource.MustParse("1000m"), }, } return deploymentTemplate(args.Args, []corev1.Container{container}) } // MakeFanoutDeployment creates the fanout Deployment object. func MakeFanoutDeployment(args FanoutArgs) *appsv1.Deployment { container := containerTemplate(args.Args) container.Resources = corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("3000Mi"), }, Requests: corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("500Mi"), corev1.ResourceCPU: resource.MustParse("1500m"), }, } container.Ports = append(container.Ports, corev1.ContainerPort{ Name: "http-health", ContainerPort: handler.DefaultHealthCheckPort, }, ) container.Env = append(container.Env, corev1.EnvVar{ Name: "MAX_CONCURRENCY_PER_EVENT", Value: "100", }) container.LivenessProbe = &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", Port: intstr.FromInt(handler.DefaultHealthCheckPort), Scheme: corev1.URISchemeHTTP, }, }, FailureThreshold: 3, InitialDelaySeconds: 15, PeriodSeconds: 15, SuccessThreshold: 1, TimeoutSeconds: 5, } return deploymentTemplate(args.Args, []corev1.Container{container}) } // MakeRetryDeployment creates the retry Deployment object. func MakeRetryDeployment(args RetryArgs) *appsv1.Deployment { container := containerTemplate(args.Args) container.Resources = corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("3000Mi"), }, Requests: corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("500Mi"), corev1.ResourceCPU: resource.MustParse("1000m"), }, } container.Ports = append(container.Ports, corev1.ContainerPort{ Name: "http-health", ContainerPort: handler.DefaultHealthCheckPort, }, ) container.LivenessProbe = &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", Port: intstr.FromInt(handler.DefaultHealthCheckPort), Scheme: corev1.URISchemeHTTP, }, }, FailureThreshold: 3, InitialDelaySeconds: 15, PeriodSeconds: 15, SuccessThreshold: 1, TimeoutSeconds: 5, } return deploymentTemplate(args.Args, []corev1.Container{container}) } // deploymentTemplate creates a template for data plane deployments. func deploymentTemplate(args Args, containers []corev1.Container) *appsv1.Deployment { return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: args.BrokerCell.Namespace, Name: Name(args.BrokerCell.Name, args.ComponentName), OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(args.BrokerCell)}, Labels: Labels(args.BrokerCell.Name, args.ComponentName), }, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{MatchLabels: Labels(args.BrokerCell.Name, args.ComponentName)}, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: Labels(args.BrokerCell.Name, args.ComponentName), Annotations: map[string]string{ "sidecar.istio.io/inject": strconv.FormatBool(args.AllowIstioSidecar), }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, Volumes: []corev1.Volume{ { Name: "broker-config", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: Name(args.BrokerCell.Name, targetsCMName)}}}, }, { Name: "google-broker-key", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "google-broker-key", Optional: &optionalSecretVolume}}, }, }, Containers: containers, }, }, }, } } // containerTemplate returns a common template for broker data plane containers. func containerTemplate(args Args) corev1.Container { return corev1.Container{ Image: args.Image, Name: args.ComponentName, Env: []corev1.EnvVar{ { Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/secrets/google/key.json", }, { Name: system.NamespaceEnvKey, Value: system.Namespace(), }, { Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: "metadata.name", }, }, }, { Name: "CONFIG_LOGGING_NAME", Value: "config-logging", }, { Name: "CONFIG_OBSERVABILITY_NAME", Value: "config-observability", }, { // Used for StackDriver only. Name: "METRICS_DOMAIN", Value: "knative.dev/internal/eventing", }, }, Ports: []corev1.ContainerPort{ { Name: "metrics", ContainerPort: int32(args.MetricsPort), }, }, VolumeMounts: []corev1.VolumeMount{ { Name: "broker-config", MountPath: "/var/run/cloud-run-events/broker", }, { Name: "google-broker-key", MountPath: "/var/secrets/google", }, }, } }
1
16,890
When `args.MemoryLimit` is empty, will `MustParse` panic?
google-knative-gcp
go
@@ -122,6 +122,11 @@ func (r *RouteTable) SetRoutes(ifaceName string, targets []Target) { r.dirtyIfaces.Add(ifaceName) } +func (r *RouteTable) QueueResync() { + r.logCxt.Info("Queueing a resync.") + r.inSync = false +} + func (r *RouteTable) Apply() error { if !r.inSync { links, err := r.dataplane.LinkList()
1
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package routetable import ( "errors" "net" "regexp" "strings" "syscall" log "github.com/Sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/projectcalico/felix/conntrack" "github.com/projectcalico/felix/ifacemonitor" "github.com/projectcalico/felix/ip" "github.com/projectcalico/felix/set" calinet "github.com/projectcalico/libcalico-go/lib/net" ) var ( GetFailed = errors.New("netlink get operation failed") ListFailed = errors.New("netlink list operation failed") UpdateFailed = errors.New("netlink update operation failed") IfaceNotPresent = errors.New("interface not present") IfaceDown = errors.New("interface down") ipV6LinkLocalCIDR = ip.MustParseCIDR("fe80::/64") ) type Target struct { CIDR ip.CIDR DestMAC net.HardwareAddr } type RouteTable struct { logCxt *log.Entry ipVersion uint8 netlinkFamily int dirtyIfaces set.Set ifacePrefixes set.Set ifacePrefixRegexp *regexp.Regexp ifaceNameToTargets map[string][]Target pendingIfaceNameToTargets map[string][]Target inSync bool // dataplane is our shim for the netlink/arp interface. In production, it maps directly // through to calls to the netlink package and the arp command. dataplane dataplaneIface } func New(interfacePrefixes []string, ipVersion uint8) *RouteTable { return NewWithShims(interfacePrefixes, ipVersion, realDataplane{conntrack: conntrack.New()}) } // NewWithShims is a test constructor, which allows netlink to be replaced by a shim. func NewWithShims(interfacePrefixes []string, ipVersion uint8, nl dataplaneIface) *RouteTable { prefixSet := set.New() regexpParts := []string{} for _, prefix := range interfacePrefixes { prefixSet.Add(prefix) regexpParts = append(regexpParts, "^"+prefix+".*") } ifaceNamePattern := strings.Join(regexpParts, "|") log.WithField("regex", ifaceNamePattern).Info("Calculated interface name regexp") family := netlink.FAMILY_V4 if ipVersion == 6 { family = netlink.FAMILY_V6 } else if ipVersion != 4 { log.WithField("ipVersion", ipVersion).Panic("Unknown IP version") } return &RouteTable{ logCxt: log.WithFields(log.Fields{ "ipVersion": ipVersion, }), ipVersion: ipVersion, netlinkFamily: family, ifacePrefixes: prefixSet, ifacePrefixRegexp: regexp.MustCompile(ifaceNamePattern), ifaceNameToTargets: map[string][]Target{}, pendingIfaceNameToTargets: map[string][]Target{}, dirtyIfaces: set.New(), dataplane: nl, } } func (r *RouteTable) OnIfaceStateChanged(ifaceName string, state ifacemonitor.State) { logCxt := r.logCxt.WithField("ifaceName", ifaceName) if !r.ifacePrefixRegexp.MatchString(ifaceName) { logCxt.Debug("Ignoring interface state change, not a Calico interface.") return } if state == ifacemonitor.StateUp { logCxt.Debug("Interface up, marking for route sync") r.dirtyIfaces.Add(ifaceName) } } func (r *RouteTable) SetRoutes(ifaceName string, targets []Target) { r.pendingIfaceNameToTargets[ifaceName] = targets r.dirtyIfaces.Add(ifaceName) } func (r *RouteTable) Apply() error { if !r.inSync { links, err := r.dataplane.LinkList() if err != nil { r.logCxt.WithError(err).Error("Failed to list interfaces, retrying...") return ListFailed } // Clear the dirty set; there's no point trying to update non-existent interfaces. r.dirtyIfaces = set.New() for _, link := range links { attrs := link.Attrs() if attrs == nil { continue } ifaceName := attrs.Name if r.ifacePrefixRegexp.MatchString(ifaceName) { r.logCxt.WithField("ifaceName", ifaceName).Debug( "Resync: found calico-owned interface") r.dirtyIfaces.Add(ifaceName) } } r.inSync = true } r.dirtyIfaces.Iter(func(item interface{}) error { retries := 2 ifaceName := item.(string) logCxt := r.logCxt.WithField("ifaceName", ifaceName) for retries > 0 { err := r.syncRoutesForLink(ifaceName) if err == IfaceNotPresent { logCxt.Info("Interface missing, will retry if it appears.") break } else if err == IfaceDown { logCxt.Info("Interface down, will retry if it goes up.") break } else if err != nil { logCxt.WithError(err).Warn("Failed to syncronise routes.") retries-- continue } logCxt.Info("Synchronised routes on interface") break } if retries == 0 { // The interface might be flapping or being deleted. logCxt.Warn("Failed to sync routes to interface even after retries. " + "Leaving it dirty.") return nil } return set.RemoveItem }) if r.dirtyIfaces.Len() > 0 { r.logCxt.Warn("Some interfaces still out-of sync.") r.inSync = false return UpdateFailed } return nil } func (r *RouteTable) syncRoutesForLink(ifaceName string) error { logCxt := r.logCxt.WithField("ifaceName", ifaceName) logCxt.Debug("Syncing interface routes") // If this is a modify or delete, grab a copy of the existing targets so we can clean up // conntrack entries even if the routes have been removed. We'll remove any still-required // CIDRs from this set below. oldCIDRs := set.New() if updatedTargets, ok := r.pendingIfaceNameToTargets[ifaceName]; ok { logCxt.Debug("Have updated targets.") oldTargets := r.ifaceNameToTargets[ifaceName] if updatedTargets == nil { delete(r.ifaceNameToTargets, ifaceName) } else { r.ifaceNameToTargets[ifaceName] = updatedTargets } for _, target := range oldTargets { oldCIDRs.Add(target.CIDR) } delete(r.pendingIfaceNameToTargets, ifaceName) } expectedTargets := r.ifaceNameToTargets[ifaceName] expectedCIDRs := set.New() for _, t := range expectedTargets { expectedCIDRs.Add(t.CIDR) oldCIDRs.Discard(t.CIDR) } if r.ipVersion == 6 { expectedCIDRs.Add(ipV6LinkLocalCIDR) oldCIDRs.Discard(ipV6LinkLocalCIDR) } // The code below may add some more CIDRs to clean up before it is done, make sure we // remove conntrack entries in any case. defer oldCIDRs.Iter(func(item interface{}) error { // Remove and conntrack entries that should no longer be there. dest := item.(ip.CIDR) r.dataplane.RemoveConntrackFlows(dest.Version(), dest.Addr().AsNetIP()) return nil }) // Try to get the link. This may fail if it's been deleted out from under us. link, err := r.dataplane.LinkByName(ifaceName) if err != nil { // Filter the error so that we don't spam errors if the interface is being torn // down. filteredErr := r.filterErrorByIfaceState(ifaceName, GetFailed) if filteredErr == GetFailed { logCxt.WithError(err).Error("Failed to get interface.") } else { logCxt.WithError(err).Info("Failed to get interface; it's down/gone.") } return filteredErr } // Got the link; try to sync its routes. Note: We used to check if the interface // was oper down before we tried to do the sync but that prevented us from removing // routes from an interface in some corner cases (such as being admin up but oper // down). linkAttrs := link.Attrs() oldRoutes, err := r.dataplane.RouteList(link, r.netlinkFamily) if err != nil { // Filter the error so that we don't spam errors if the interface is being torn // down. filteredErr := r.filterErrorByIfaceState(ifaceName, ListFailed) if filteredErr == ListFailed { logCxt.WithError(err).Error("Error listing routes") } else { logCxt.WithError(err).Info("Failed to list routes; interface down/gone.") } return filteredErr } seenCIDRs := set.New() updatesFailed := false for _, route := range oldRoutes { var dest ip.CIDR if route.Dst != nil { dest = ip.CIDRFromIPNet(calinet.IPNet{*route.Dst}) } if !expectedCIDRs.Contains(dest) { logCxt := logCxt.WithField("dest", dest) logCxt.Info("Syncing routes: removing old route.") if err := r.dataplane.RouteDel(&route); err != nil { // Probably a race with the interface being deleted. logCxt.WithError(err).Info( "Route deletion failed, assuming someone got there first.") updatesFailed = true } if dest != nil { // Collect any old route CIDRs that we find in the dataplane so we // can remove their conntrack entries later. oldCIDRs.Add(dest) } } seenCIDRs.Add(dest) } for _, target := range expectedTargets { cidr := target.CIDR if !seenCIDRs.Contains(cidr) { logCxt := logCxt.WithField("targetCIDR", target.CIDR) logCxt.Info("Syncing routes: adding new route.") ipNet := cidr.ToIPNet() route := netlink.Route{ LinkIndex: linkAttrs.Index, Dst: &ipNet, Type: syscall.RTN_UNICAST, Protocol: syscall.RTPROT_BOOT, Scope: netlink.SCOPE_LINK, } if err := r.dataplane.RouteAdd(&route); err != nil { logCxt.WithError(err).Warn("Failed to add route") updatesFailed = true } } if r.ipVersion == 4 && target.DestMAC != nil { // TODO(smc) clean up/sync old ARP entries err := r.dataplane.AddStaticArpEntry(cidr, target.DestMAC, ifaceName) if err != nil { logCxt.WithError(err).Warn("Failed to set ARP entry") updatesFailed = true } } } if updatesFailed { // Recheck whether the interface exists so we don't produce spammy logs during // interface removal. return r.filterErrorByIfaceState(ifaceName, UpdateFailed) } return nil } // filterErrorByIfaceState checks the current state of the interface; it's down or gone, it returns // IfaceDown or IfaceError, otherwise, it returns the given defaultErr. func (r *RouteTable) filterErrorByIfaceState(ifaceName string, defaultErr error) error { logCxt := r.logCxt.WithField("ifaceName", ifaceName) if link, err := r.dataplane.LinkByName(ifaceName); err == nil { // Link still exists. Check if it's up. if link.Attrs().Flags&net.FlagUp != 0 { // Link exists and it's up, no reason that we expect to fail. return defaultErr } else { // Special case: Link exists and it's down. Assume that's the problem. return IfaceDown } } else if strings.Contains(err.Error(), "not found") { // Special case: Link no longer exists. return IfaceNotPresent } else { // Failed to list routes, then failed to check if interface exists. logCxt.WithError(err).Error("Failed to access interface after a failure") return defaultErr } }
1
15,063
How soon after this will Apply() be called? It would be a shame if there was still a significant delay before a missing or superfluous route was corrected.
projectcalico-felix
c
@@ -23,6 +23,7 @@ import ( ) type nonceWithTTL struct { + idx int nonce uint64 deadline time.Time }
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package actpool import ( "container/heap" "math/big" "sort" "time" "github.com/facebookgo/clock" "go.uber.org/zap" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/action" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/pkg/log" ) type nonceWithTTL struct { nonce uint64 deadline time.Time } type noncePriorityQueue []*nonceWithTTL func (h noncePriorityQueue) Len() int { return len(h) } func (h noncePriorityQueue) Less(i, j int) bool { return h[i].nonce < h[j].nonce } func (h noncePriorityQueue) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h *noncePriorityQueue) Push(x interface{}) { if in, ok := x.(*nonceWithTTL); ok { *h = append(*h, in) } } func (h *noncePriorityQueue) Pop() interface{} { old := *h n := len(old) if n == 0 { return nil } x := old[n-1] old[n-1] = nil // avoid memory leak *h = old[0 : n-1] return x } // ActQueue is the interface of actQueue type ActQueue interface { Put(action.SealedEnvelope) error FilterNonce(uint64) []action.SealedEnvelope UpdateQueue(uint64) []action.SealedEnvelope SetPendingNonce(uint64) PendingNonce() uint64 SetPendingBalance(*big.Int) PendingBalance() *big.Int Len() int Empty() bool PendingActs() []action.SealedEnvelope AllActs() []action.SealedEnvelope } // actQueue is a queue of actions from an account type actQueue struct { ap *actPool address string // Map that stores all the actions belonging to an account associated with nonces items map[uint64]action.SealedEnvelope // Priority Queue that stores all the nonces belonging to an account. Nonces are used as indices for action map index noncePriorityQueue // Current pending nonce tracking previous actions that can be committed to the next block for the account pendingNonce uint64 // Current pending balance for the account pendingBalance *big.Int clock clock.Clock ttl time.Duration } // ActQueueOption is the option for actQueue. type ActQueueOption interface { SetActQueueOption(*actQueue) } // NewActQueue create a new action queue func NewActQueue(ap *actPool, address string, ops ...ActQueueOption) ActQueue { aq := &actQueue{ ap: ap, address: address, items: make(map[uint64]action.SealedEnvelope), index: noncePriorityQueue{}, pendingNonce: uint64(1), // Taking coinbase Action into account, pendingNonce should start with 1 pendingBalance: big.NewInt(0), clock: clock.New(), ttl: 0, } for _, op := range ops { op.SetActQueueOption(aq) } return aq } // Put inserts a new action into the map, also updating the queue's nonce index func (q *actQueue) Put(act action.SealedEnvelope) error { nonce := act.Nonce() if actInPool, exist := q.items[nonce]; exist { // act of higher gas price cut in line if act.GasPrice().Cmp(actInPool.GasPrice()) != 1 { return action.ErrReplaceUnderpriced } // update action in q.items and q.index q.items[nonce] = act for i, x := range q.index { if x.nonce == nonce { q.index[i].deadline = q.clock.Now().Add(q.ttl) break } } return nil } heap.Push(&q.index, &nonceWithTTL{nonce: nonce, deadline: q.clock.Now().Add(q.ttl)}) q.items[nonce] = act return nil } // FilterNonce removes all actions from the map with a nonce lower than the given threshold func (q *actQueue) FilterNonce(threshold uint64) []action.SealedEnvelope { var removed []action.SealedEnvelope // Pop off priority queue and delete corresponding entries from map until the threshold is reached for q.index.Len() > 0 && (q.index)[0].nonce < threshold { nonce := heap.Pop(&q.index).(*nonceWithTTL).nonce removed = append(removed, q.items[nonce]) delete(q.items, nonce) } return removed } func (q *actQueue) cleanTimeout() []action.SealedEnvelope { if q.ttl == 0 { return []action.SealedEnvelope{} } var ( removedFromQueue = make([]action.SealedEnvelope, 0) timeNow = q.clock.Now() size = len(q.index) ) for i := 0; i < size; { if timeNow.After(q.index[i].deadline) { removedFromQueue = append(removedFromQueue, q.items[q.index[i].nonce]) delete(q.items, q.index[i].nonce) q.index[i] = q.index[size-1] size-- continue } i++ } q.index = q.index[:size] heap.Init(&q.index) return removedFromQueue } // UpdateQueue updates the pending nonce and balance of the queue func (q *actQueue) UpdateQueue(nonce uint64) []action.SealedEnvelope { // First remove all timed out actions removedFromQueue := q.cleanTimeout() // Now, starting from the current pending nonce, incrementally find the next pending nonce // while updating pending balance if actions are payable for ; ; nonce++ { _, exist := q.items[nonce] if !exist { break } if !q.enoughBalance(q.items[nonce], true) { break } } q.pendingNonce = nonce // Find the index of new pending nonce within the queue sort.Sort(q.index) i := 0 for ; i < q.index.Len(); i++ { if q.index[i].nonce >= nonce { break } } // Case I: An unpayable action has been found while updating pending nonce/balance // Remove all the subsequent actions in the queue starting from the index of new pending nonce if _, exist := q.items[nonce]; exist { removedFromQueue = append(removedFromQueue, q.removeActs(i)...) return removedFromQueue } // Case II: All actions are payable while updating pending nonce/balance // Check all the subsequent actions in the queue starting from the index of new pending nonce // Find the nonce index of the first unpayable action // Remove all the subsequent actions in the queue starting from that index for ; i < q.index.Len(); i++ { nonce = q.index[i].nonce act := q.items[nonce] if !q.enoughBalance(act, false) { break } } removedFromQueue = append(removedFromQueue, q.removeActs(i)...) return removedFromQueue } // SetPendingNonce sets pending nonce for the queue func (q *actQueue) SetPendingNonce(nonce uint64) { q.pendingNonce = nonce } // PendingNonce returns the current pending nonce of the queue func (q *actQueue) PendingNonce() uint64 { return q.pendingNonce } // SetPendingBalance sets pending balance for the queue func (q *actQueue) SetPendingBalance(balance *big.Int) { q.pendingBalance = balance } // PendingBalance returns the current pending balance of the queue func (q *actQueue) PendingBalance() *big.Int { return q.pendingBalance } // Len returns the length of the action map func (q *actQueue) Len() int { return len(q.items) } // Empty returns whether the queue of actions is empty or not func (q *actQueue) Empty() bool { return q.Len() == 0 } // PendingActs creates a consecutive nonce-sorted slice of actions func (q *actQueue) PendingActs() []action.SealedEnvelope { if q.Len() == 0 { return nil } acts := make([]action.SealedEnvelope, 0, len(q.items)) addr, err := address.FromString(q.address) if err != nil { log.L().Error("Error when getting the address", zap.String("address", q.address), zap.Error(err)) return nil } confirmedState, err := accountutil.AccountState(q.ap.sf, addr) if err != nil { log.L().Error("Error when getting the nonce", zap.String("address", q.address), zap.Error(err)) return nil } nonce := confirmedState.Nonce + 1 for ; ; nonce++ { if _, exist := q.items[nonce]; !exist { break } acts = append(acts, q.items[nonce]) } return acts } // AllActs returns all the actions currently in queue func (q *actQueue) AllActs() []action.SealedEnvelope { acts := make([]action.SealedEnvelope, 0, len(q.items)) if q.Len() == 0 { return acts } sort.Sort(q.index) for _, nonce := range q.index { acts = append(acts, q.items[nonce.nonce]) } return acts } // removeActs removes all the actions starting at idx from queue func (q *actQueue) removeActs(idx int) []action.SealedEnvelope { removedFromQueue := make([]action.SealedEnvelope, 0) for i := idx; i < q.index.Len(); i++ { removedFromQueue = append(removedFromQueue, q.items[q.index[i].nonce]) delete(q.items, q.index[i].nonce) } q.index = q.index[:idx] heap.Init(&q.index) return removedFromQueue } // enoughBalance helps check whether queue's pending balance is sufficient for the given action func (q *actQueue) enoughBalance(act action.SealedEnvelope, updateBalance bool) bool { cost, _ := act.Cost() if q.pendingBalance.Cmp(cost) < 0 { return false } if updateBalance { q.pendingBalance.Sub(q.pendingBalance, cost) } return true }
1
24,655
I add a `idx` property, and it is only use for benchmark `heap.Remove`, is it acceptable?
iotexproject-iotex-core
go
@@ -1,6 +1,6 @@ # -*- coding: UTF-8 -*- #A part of NonVisual Desktop Access (NVDA) -#Copyright (C) 2006-2018 NV Access Limited, Babbage B.V., Davy Kager +#Copyright (C) 2006-2019tNV Access Limited, Babbage B.V., Davy Kager, Derek Riemer #This file is covered by the GNU General Public License. #See the file COPYING for more details.
1
# -*- coding: UTF-8 -*- #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2006-2018 NV Access Limited, Babbage B.V., Davy Kager #This file is covered by the GNU General Public License. #See the file COPYING for more details. from cStringIO import StringIO from configobj import ConfigObj #: The version of the schema outlined in this file. Increment this when modifying the schema and #: provide an upgrade step (@see profileUpgradeSteps.py). An upgrade step does not need to be added when #: just adding a new element to (or removing from) the schema, only when old versions of the config #: (conforming to old schema versions) will not work correctly with the new schema. latestSchemaVersion = 2 #: The configuration specification string #: @type: String configSpecString = ("""# NVDA Configuration File schemaVersion = integer(min=0, default={latestSchemaVersion}) [general] language = string(default="Windows") saveConfigurationOnExit = boolean(default=True) askToExit = boolean(default=true) playStartAndExitSounds = boolean(default=true) #possible log levels are DEBUG, IO, DEBUGWARNING, INFO loggingLevel = string(default="INFO") showWelcomeDialogAtStartup = boolean(default=true) # Speech settings [speech] # The synthesiser to use synth = string(default=auto) symbolLevel = integer(default=100) trustVoiceLanguage = boolean(default=true) includeCLDR = boolean(default=True) beepSpeechModePitch = integer(default=10000,min=50,max=11025) outputDevice = string(default=default) autoLanguageSwitching = boolean(default=true) autoDialectSwitching = boolean(default=false) [[__many__]] capPitchChange = integer(default=30,min=-100,max=100) sayCapForCapitals = boolean(default=false) beepForCapitals = boolean(default=false) useSpellingFunctionality = boolean(default=true) # Audio settings [audio] audioDuckingMode = integer(default=0) # Braille settings [braille] display = string(default=auto) translationTable = string(default=en-ueb-g1.ctb) inputTable = string(default=en-ueb-g1.ctb) expandAtCursor = boolean(default=true) showCursor = boolean(default=true) cursorBlink = boolean(default=true) cursorBlinkRate = integer(default=500,min=200,max=2000) cursorShapeFocus = integer(default=192,min=1,max=255) cursorShapeReview = integer(default=128,min=1,max=255) noMessageTimeout = boolean(default=false) messageTimeout = integer(default=4,min=0,max=20) tetherTo = string(default="focus") autoTether = boolean(default=true) readByParagraph = boolean(default=false) wordWrap = boolean(default=true) focusContextPresentation = option("changedContext", "fill", "scroll", default="changedContext") # Braille display driver settings [[__many__]] port = string(default="") # Presentation settings [presentation] reportKeyboardShortcuts = boolean(default=true) reportObjectPositionInformation = boolean(default=true) guessObjectPositionInformationWhenUnavailable = boolean(default=false) reportTooltips = boolean(default=false) reportHelpBalloons = boolean(default=true) reportObjectDescriptions = boolean(default=True) reportDynamicContentChanges = boolean(default=True) reportAutoSuggestionsWithSound = boolean(default=True) [[progressBarUpdates]] reportBackgroundProgressBars = boolean(default=false) #output modes are beep, speak, both, or off progressBarOutputMode = string(default="beep") speechPercentageInterval = integer(default=10) beepPercentageInterval = integer(default=1) beepMinHZ = integer(default=110) [mouse] enableMouseTracking = boolean(default=True) #must be true for any of the other settings to work mouseTextUnit = string(default="paragraph") reportObjectRoleOnMouseEnter = boolean(default=False) audioCoordinatesOnMouseMove = boolean(default=False) audioCoordinates_detectBrightness = boolean(default=False) audioCoordinates_blurFactor = integer(default=3) audioCoordinates_minVolume = float(default=0.1) audioCoordinates_maxVolume = float(default=1.0) audioCoordinates_minPitch = integer(default=220) audioCoordinates_maxPitch = integer(default=880) reportMouseShapeChanges = boolean(default=false) ignoreInjectedMouseInput = boolean(default=false) [speechViewer] showSpeechViewerAtStartup = boolean(default=false) autoPositionWindow = boolean(default=True) # values for positioning the window. Defaults are not used. They should not be read if autoPositionWindow is True x = integer() y = integer() width = integer() height = integer() displays = string_list() #Keyboard settings [keyboard] useCapsLockAsNVDAModifierKey = boolean(default=false) useNumpadInsertAsNVDAModifierKey = boolean(default=true) useExtendedInsertAsNVDAModifierKey = boolean(default=true) keyboardLayout = string(default="desktop") speakTypedCharacters = boolean(default=true) speakTypedWords = boolean(default=false) beepForLowercaseWithCapslock = boolean(default=true) speakCommandKeys = boolean(default=false) speechInterruptForCharacters = boolean(default=true) speechInterruptForEnter = boolean(default=true) allowSkimReadingInSayAll = boolean(default=False) alertForSpellingErrors = boolean(default=True) handleInjectedKeys= boolean(default=true) [virtualBuffers] maxLineLength = integer(default=100) linesPerPage = integer(default=25) useScreenLayout = boolean(default=True) autoPassThroughOnFocusChange = boolean(default=true) autoPassThroughOnCaretMove = boolean(default=false) passThroughAudioIndication = boolean(default=true) autoSayAllOnPageLoad = boolean(default=true) trapNonCommandGestures = boolean(default=true) focusFollowsBrowse = boolean(default=True) [touch] touchTyping = boolean(default=False) #Settings for document reading (such as MS Word and wordpad) [documentFormatting] #These settings affect what information is reported when you navigate to text where the formatting or placement has changed detectFormatAfterCursor = boolean(default=false) reportFontName = boolean(default=false) reportFontSize = boolean(default=false) reportFontAttributes = boolean(default=false) reportRevisions = boolean(default=true) reportEmphasis = boolean(default=false) reportColor = boolean(default=False) reportAlignment = boolean(default=false) reportLineSpacing = boolean(default=false) reportStyle = boolean(default=false) reportSpellingErrors = boolean(default=true) reportPage = boolean(default=true) reportLineNumber = boolean(default=False) reportLineIndentation = boolean(default=False) reportLineIndentationWithTones = boolean(default=False) reportParagraphIndentation = boolean(default=False) reportTables = boolean(default=true) includeLayoutTables = boolean(default=False) reportTableHeaders = boolean(default=True) reportTableCellCoords = boolean(default=True) reportBorderStyle = boolean(default=False) reportBorderColor = boolean(default=False) reportLinks = boolean(default=true) reportComments = boolean(default=true) reportLists = boolean(default=true) reportHeadings = boolean(default=true) reportBlockQuotes = boolean(default=true) reportLandmarks = boolean(default=true) reportFrames = boolean(default=true) reportClickable = boolean(default=true) [reviewCursor] simpleReviewMode = boolean(default=True) followFocus = boolean(default=True) followCaret = boolean(default=True) followMouse = boolean(default=False) [UIA] enabled = boolean(default=true) useInMSWordWhenAvailable = boolean(default=false) [update] autoCheck = boolean(default=true) startupNotification = boolean(default=true) allowUsageStats = boolean(default=false) askedAllowUsageStats = boolean(default=false) [inputComposition] autoReportAllCandidates = boolean(default=True) announceSelectedCandidate = boolean(default=True) alwaysIncludeShortCharacterDescriptionInCandidateName = boolean(default=True) reportReadingStringChanges = boolean(default=True) reportCompositionStringChanges = boolean(default=True) [debugLog] hwIo = boolean(default=false) audioDucking = boolean(default=false) gui = boolean(default=false) louis = boolean(default=false) timeSinceInput = boolean(default=false) [uwpOcr] language = string(default="") [upgrade] newLaptopKeyboardLayout = boolean(default=false) [editableText] caretMoveTimeoutMs = integer(min=0, max=2000, default=100) [development] enableScratchpadDir = boolean(default=false) """).format(latestSchemaVersion=latestSchemaVersion) #: The configuration specification #: @type: ConfigObj confspec = ConfigObj(StringIO( configSpecString ), list_values=False, encoding="UTF-8") confspec.newlines = "\r\n"
1
24,684
daemonic t got inserted.
nvaccess-nvda
py
@@ -532,7 +532,8 @@ public class DefaultBlockchain implements MutableBlockchain { if (!genesisHash.get().equals(genesisBlock.getHash())) { throw new InvalidConfigurationException( "Supplied genesis block does not match stored chain data.\n" - + "Please specify a different data directory with --data-path or specify the original genesis file with --genesis-file."); + + "Please specify a different data directory with --data-path or specify the" + + " original genesis file with --genesis-file."); } } }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.chain; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static java.util.Collections.emptyList; import static java.util.stream.Collectors.joining; import static java.util.stream.Collectors.toList; import org.hyperledger.besu.ethereum.core.Block; import org.hyperledger.besu.ethereum.core.BlockBody; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.core.BlockWithReceipts; import org.hyperledger.besu.ethereum.core.Difficulty; import org.hyperledger.besu.ethereum.core.Hash; import org.hyperledger.besu.ethereum.core.LogWithMetadata; import org.hyperledger.besu.ethereum.core.Transaction; import org.hyperledger.besu.ethereum.core.TransactionReceipt; import org.hyperledger.besu.metrics.BesuMetricCategory; import org.hyperledger.besu.plugin.services.MetricsSystem; import org.hyperledger.besu.util.InvalidConfigurationException; import org.hyperledger.besu.util.Subscribers; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Streams; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; public class DefaultBlockchain implements MutableBlockchain { private static final Logger LOG = LogManager.getLogger(); protected final BlockchainStorage blockchainStorage; private final Subscribers<BlockAddedObserver> blockAddedObservers = Subscribers.create(); private final Subscribers<ChainReorgObserver> blockReorgObservers = Subscribers.create(); private final long reorgLoggingThreshold; private volatile BlockHeader chainHeader; private volatile Difficulty totalDifficulty; private volatile int chainHeadTransactionCount; private volatile int chainHeadOmmerCount; private DefaultBlockchain( final Optional<Block> genesisBlock, final BlockchainStorage blockchainStorage, final MetricsSystem metricsSystem, final long reorgLoggingThreshold) { checkNotNull(genesisBlock); checkNotNull(blockchainStorage); checkNotNull(metricsSystem); this.blockchainStorage = blockchainStorage; genesisBlock.ifPresent(this::setGenesis); final Hash chainHead = blockchainStorage.getChainHead().get(); chainHeader = blockchainStorage.getBlockHeader(chainHead).get(); totalDifficulty = blockchainStorage.getTotalDifficulty(chainHead).get(); final BlockBody chainHeadBody = blockchainStorage.getBlockBody(chainHead).get(); chainHeadTransactionCount = chainHeadBody.getTransactions().size(); chainHeadOmmerCount = chainHeadBody.getOmmers().size(); metricsSystem.createLongGauge( BesuMetricCategory.ETHEREUM, "blockchain_height", "The current height of the canonical chain", this::getChainHeadBlockNumber); metricsSystem.createGauge( BesuMetricCategory.BLOCKCHAIN, "difficulty_total", "Total difficulty of the chainhead", () -> this.getChainHead().getTotalDifficulty().toBigInteger().doubleValue()); metricsSystem.createLongGauge( BesuMetricCategory.BLOCKCHAIN, "chain_head_timestamp", "Timestamp from the current chain head", () -> getChainHeadHeader().getTimestamp()); metricsSystem.createLongGauge( BesuMetricCategory.BLOCKCHAIN, "chain_head_gas_used", "Gas used by the current chain head block", () -> getChainHeadHeader().getGasUsed()); metricsSystem.createLongGauge( BesuMetricCategory.BLOCKCHAIN, "chain_head_gas_limit", "Block gas limit of the current chain head block", () -> getChainHeadHeader().getGasLimit()); metricsSystem.createIntegerGauge( BesuMetricCategory.BLOCKCHAIN, "chain_head_transaction_count", "Number of transactions in the current chain head block", () -> chainHeadTransactionCount); metricsSystem.createIntegerGauge( BesuMetricCategory.BLOCKCHAIN, "chain_head_ommer_count", "Number of ommers in the current chain head block", () -> chainHeadOmmerCount); this.reorgLoggingThreshold = reorgLoggingThreshold; } public static MutableBlockchain createMutable( final Block genesisBlock, final BlockchainStorage blockchainStorage, final MetricsSystem metricsSystem, final long reorgLoggingThreshold) { checkNotNull(genesisBlock); return new DefaultBlockchain( Optional.of(genesisBlock), blockchainStorage, metricsSystem, reorgLoggingThreshold); } public static Blockchain create( final BlockchainStorage blockchainStorage, final MetricsSystem metricsSystem, final long reorgLoggingThreshold) { checkArgument( validateStorageNonEmpty(blockchainStorage), "Cannot create Blockchain from empty storage"); return new DefaultBlockchain( Optional.empty(), blockchainStorage, metricsSystem, reorgLoggingThreshold); } private static boolean validateStorageNonEmpty(final BlockchainStorage blockchainStorage) { // Run a few basic checks to make sure data looks available and consistent return blockchainStorage .getChainHead() .flatMap(blockchainStorage::getTotalDifficulty) .isPresent() && blockchainStorage.getBlockHash(BlockHeader.GENESIS_BLOCK_NUMBER).isPresent(); } @Override public ChainHead getChainHead() { return new ChainHead(chainHeader.getHash(), totalDifficulty, chainHeader.getNumber()); } @Override public Hash getChainHeadHash() { return chainHeader.getHash(); } @Override public long getChainHeadBlockNumber() { return chainHeader.getNumber(); } @Override public BlockHeader getChainHeadHeader() { return chainHeader; } @Override public Block getChainHeadBlock() { return new Block(chainHeader, blockchainStorage.getBlockBody(chainHeader.getHash()).get()); } @Override public Optional<BlockHeader> getBlockHeader(final long blockNumber) { return blockchainStorage.getBlockHash(blockNumber).flatMap(blockchainStorage::getBlockHeader); } @Override public Optional<BlockHeader> getBlockHeader(final Hash blockHeaderHash) { return blockchainStorage.getBlockHeader(blockHeaderHash); } @Override public Optional<BlockBody> getBlockBody(final Hash blockHeaderHash) { return blockchainStorage.getBlockBody(blockHeaderHash); } @Override public Optional<List<TransactionReceipt>> getTxReceipts(final Hash blockHeaderHash) { return blockchainStorage.getTransactionReceipts(blockHeaderHash); } @Override public Optional<Hash> getBlockHashByNumber(final long number) { return blockchainStorage.getBlockHash(number); } @Override public Optional<Difficulty> getTotalDifficultyByHash(final Hash blockHeaderHash) { return blockchainStorage.getTotalDifficulty(blockHeaderHash); } @Override public Optional<Transaction> getTransactionByHash(final Hash transactionHash) { return blockchainStorage .getTransactionLocation(transactionHash) .flatMap( l -> blockchainStorage .getBlockBody(l.getBlockHash()) .map(b -> b.getTransactions().get(l.getTransactionIndex()))); } @Override public Optional<TransactionLocation> getTransactionLocation(final Hash transactionHash) { return blockchainStorage.getTransactionLocation(transactionHash); } @Override public synchronized void appendBlock(final Block block, final List<TransactionReceipt> receipts) { checkArgument( block.getBody().getTransactions().size() == receipts.size(), "Supplied receipts do not match block transactions."); if (blockIsAlreadyTracked(block)) { return; } checkArgument(blockIsConnected(block), "Attempt to append non-connected block."); final BlockAddedEvent blockAddedEvent = appendBlockHelper(new BlockWithReceipts(block, receipts)); blockAddedObservers.forEach(observer -> observer.onBlockAdded(blockAddedEvent)); } private BlockAddedEvent appendBlockHelper(final BlockWithReceipts blockWithReceipts) { final Block block = blockWithReceipts.getBlock(); final List<TransactionReceipt> receipts = blockWithReceipts.getReceipts(); final Hash hash = block.getHash(); final Difficulty td = calculateTotalDifficulty(block); final BlockchainStorage.Updater updater = blockchainStorage.updater(); updater.putBlockHeader(hash, block.getHeader()); updater.putBlockBody(hash, block.getBody()); updater.putTransactionReceipts(hash, receipts); updater.putTotalDifficulty(hash, td); // Update canonical chain data final BlockAddedEvent blockAddedEvent = updateCanonicalChainData(updater, blockWithReceipts, td); updater.commit(); if (blockAddedEvent.isNewCanonicalHead()) { updateCacheForNewCanonicalHead(block, td); } return blockAddedEvent; } private Difficulty calculateTotalDifficulty(final Block block) { if (block.getHeader().getNumber() == BlockHeader.GENESIS_BLOCK_NUMBER) { return block.getHeader().getDifficulty(); } final Difficulty parentTotalDifficulty = blockchainStorage .getTotalDifficulty(block.getHeader().getParentHash()) .orElseThrow( () -> new IllegalStateException("Blockchain is missing total difficulty data.")); return block.getHeader().getDifficulty().add(parentTotalDifficulty); } private BlockAddedEvent updateCanonicalChainData( final BlockchainStorage.Updater updater, final BlockWithReceipts blockWithReceipts, final Difficulty totalDifficulty) { final Block newBlock = blockWithReceipts.getBlock(); final Hash chainHead = blockchainStorage.getChainHead().orElse(null); if (newBlock.getHeader().getNumber() != BlockHeader.GENESIS_BLOCK_NUMBER && chainHead == null) { throw new IllegalStateException("Blockchain is missing chain head."); } final Hash newBlockHash = newBlock.getHash(); try { if (chainHead == null || newBlock.getHeader().getParentHash().equals(chainHead)) { // This block advances the chain, update the chain head updater.putBlockHash(newBlock.getHeader().getNumber(), newBlockHash); updater.setChainHead(newBlockHash); indexTransactionForBlock(updater, newBlockHash, newBlock.getBody().getTransactions()); return BlockAddedEvent.createForHeadAdvancement( newBlock, LogWithMetadata.generate( blockWithReceipts.getBlock(), blockWithReceipts.getReceipts(), false), blockWithReceipts.getReceipts()); } else if (totalDifficulty.compareTo(blockchainStorage.getTotalDifficulty(chainHead).get()) > 0) { // New block represents a chain reorganization return handleChainReorg(updater, blockWithReceipts); } else { // New block represents a fork return handleFork(updater, newBlock); } } catch (final NoSuchElementException e) { // Any Optional.get() calls in this block should be present, missing data means data // corruption or a bug. updater.rollback(); throw new IllegalStateException("Blockchain is missing data that should be present.", e); } } private BlockAddedEvent handleFork(final BlockchainStorage.Updater updater, final Block fork) { final Collection<Hash> forkHeads = blockchainStorage.getForkHeads(); // Check to see if this block advances any existing fork. // This block will replace its parent forkHeads.stream() .filter(head -> head.equals(fork.getHeader().getParentHash())) .findAny() .ifPresent(forkHeads::remove); forkHeads.add(fork.getHash()); updater.setForkHeads(forkHeads); return BlockAddedEvent.createForFork(fork); } private BlockAddedEvent handleChainReorg( final BlockchainStorage.Updater updater, final BlockWithReceipts newChainHeadWithReceipts) { BlockWithReceipts oldChainWithReceipts = getBlockWithReceipts(chainHeader).get(); BlockWithReceipts currentOldChainWithReceipts = oldChainWithReceipts; BlockWithReceipts currentNewChainWithReceipts = newChainHeadWithReceipts; // Update chain head updater.setChainHead(currentNewChainWithReceipts.getHeader().getHash()); // Track transactions and logs to be added and removed final Map<Hash, List<Transaction>> newTransactions = new HashMap<>(); final List<Transaction> removedTransactions = new ArrayList<>(); final List<LogWithMetadata> addedLogsWithMetadata = new ArrayList<>(); final List<LogWithMetadata> removedLogsWithMetadata = new ArrayList<>(); while (currentNewChainWithReceipts.getNumber() > currentOldChainWithReceipts.getNumber()) { // If new chain is longer than old chain, walk back until we meet the old chain by number // adding indexing for new chain along the way. final Hash blockHash = currentNewChainWithReceipts.getHash(); updater.putBlockHash(currentNewChainWithReceipts.getNumber(), blockHash); newTransactions.put( blockHash, currentNewChainWithReceipts.getBlock().getBody().getTransactions()); addAddedLogsWithMetadata(addedLogsWithMetadata, currentNewChainWithReceipts); notifyChainReorgBlockAdded(currentNewChainWithReceipts); currentNewChainWithReceipts = getParentBlockWithReceipts(currentNewChainWithReceipts); } while (currentOldChainWithReceipts.getNumber() > currentNewChainWithReceipts.getNumber()) { // If oldChain is longer than new chain, walk back until we meet the new chain by number, // updating as we go. updater.removeBlockHash(currentOldChainWithReceipts.getNumber()); removedTransactions.addAll( currentOldChainWithReceipts.getBlock().getBody().getTransactions()); addRemovedLogsWithMetadata(removedLogsWithMetadata, currentOldChainWithReceipts); currentOldChainWithReceipts = getParentBlockWithReceipts(currentOldChainWithReceipts); } while (!currentOldChainWithReceipts.getHash().equals(currentNewChainWithReceipts.getHash())) { // Walk back until we meet the common ancestor between the two chains, updating as we go. final Hash newBlockHash = currentNewChainWithReceipts.getHash(); updater.putBlockHash(currentNewChainWithReceipts.getNumber(), newBlockHash); newTransactions.put( newBlockHash, currentNewChainWithReceipts.getBlock().getBody().getTransactions()); removedTransactions.addAll( currentOldChainWithReceipts.getBlock().getBody().getTransactions()); addAddedLogsWithMetadata(addedLogsWithMetadata, currentNewChainWithReceipts); addRemovedLogsWithMetadata(removedLogsWithMetadata, currentOldChainWithReceipts); currentNewChainWithReceipts = getParentBlockWithReceipts(currentNewChainWithReceipts); currentOldChainWithReceipts = getParentBlockWithReceipts(currentOldChainWithReceipts); } final BlockWithReceipts commonAncestorWithReceipts = currentNewChainWithReceipts; // Update indexed transactions newTransactions.forEach( (blockHash, transactionsInBlock) -> { indexTransactionForBlock(updater, blockHash, transactionsInBlock); // Don't remove transactions that are being re-indexed. removedTransactions.removeAll(transactionsInBlock); }); clearIndexedTransactionsForBlock(updater, removedTransactions); // Update tracked forks final Collection<Hash> forks = blockchainStorage.getForkHeads(); // Old head is now a fork forks.add(oldChainWithReceipts.getHash()); // Remove new chain head's parent if it was tracked as a fork final Optional<Hash> parentFork = forks.stream() .filter(f -> f.equals(newChainHeadWithReceipts.getHeader().getParentHash())) .findAny(); parentFork.ifPresent(forks::remove); updater.setForkHeads(forks); maybeLogReorg(newChainHeadWithReceipts, oldChainWithReceipts, commonAncestorWithReceipts); return BlockAddedEvent.createForChainReorg( newChainHeadWithReceipts.getBlock(), newTransactions.values().stream().flatMap(Collection::stream).collect(toList()), removedTransactions, newChainHeadWithReceipts.getReceipts(), Stream.concat(removedLogsWithMetadata.stream(), addedLogsWithMetadata.stream()) .collect(Collectors.toUnmodifiableList()), currentNewChainWithReceipts.getBlock().getHash()); } private void maybeLogReorg( final BlockWithReceipts newChainHeadWithReceipts, final BlockWithReceipts oldChainWithReceipts, final BlockWithReceipts commonAncestorWithReceipts) { if (newChainHeadWithReceipts.getNumber() - commonAncestorWithReceipts.getNumber() > reorgLoggingThreshold || oldChainWithReceipts.getNumber() - commonAncestorWithReceipts.getNumber() > reorgLoggingThreshold) { LOG.warn( "Chain Reorganization +{} new / -{} old\n{}", () -> newChainHeadWithReceipts.getNumber() - commonAncestorWithReceipts.getNumber(), () -> oldChainWithReceipts.getNumber() - commonAncestorWithReceipts.getNumber(), () -> Streams.zip( Stream.of("Old", "New", "Ancestor"), Stream.of( oldChainWithReceipts, newChainHeadWithReceipts, commonAncestorWithReceipts) .map( blockWithReceipts -> String.format( "hash: %s, height: %s", blockWithReceipts.getHash(), blockWithReceipts.getNumber())), (label, values) -> String.format("%10s - %s", label, values)) .collect(joining("\n"))); } } @Override public boolean rewindToBlock(final long blockNumber) { final Optional<Hash> blockHash = blockchainStorage.getBlockHash(blockNumber); if (blockHash.isEmpty()) { return false; } final BlockchainStorage.Updater updater = blockchainStorage.updater(); try { final BlockHeader oldBlockHeader = blockchainStorage.getBlockHeader(blockHash.get()).get(); final BlockWithReceipts blockWithReceipts = getBlockWithReceipts(oldBlockHeader).get(); final Block block = blockWithReceipts.getBlock(); handleChainReorg(updater, blockWithReceipts); updater.commit(); updateCacheForNewCanonicalHead(block, calculateTotalDifficulty(block)); return true; } catch (final NoSuchElementException e) { // Any Optional.get() calls in this block should be present, missing data means data // corruption or a bug. updater.rollback(); throw new IllegalStateException("Blockchain is missing data that should be present.", e); } } void updateCacheForNewCanonicalHead(final Block block, final Difficulty uInt256) { chainHeader = block.getHeader(); totalDifficulty = uInt256; chainHeadTransactionCount = block.getBody().getTransactions().size(); chainHeadOmmerCount = block.getBody().getOmmers().size(); } private static void indexTransactionForBlock( final BlockchainStorage.Updater updater, final Hash hash, final List<Transaction> txs) { for (int i = 0; i < txs.size(); i++) { final Hash txHash = txs.get(i).getHash(); final TransactionLocation loc = new TransactionLocation(hash, i); updater.putTransactionLocation(txHash, loc); } } private static void clearIndexedTransactionsForBlock( final BlockchainStorage.Updater updater, final List<Transaction> txs) { for (final Transaction tx : txs) { updater.removeTransactionLocation(tx.getHash()); } } @VisibleForTesting Set<Hash> getForks() { return new HashSet<>(blockchainStorage.getForkHeads()); } private void setGenesis(final Block genesisBlock) { checkArgument( genesisBlock.getHeader().getNumber() == BlockHeader.GENESIS_BLOCK_NUMBER, "Invalid genesis block."); final Optional<Hash> maybeHead = blockchainStorage.getChainHead(); if (maybeHead.isEmpty()) { // Initialize blockchain store with genesis block. final BlockchainStorage.Updater updater = blockchainStorage.updater(); final Hash hash = genesisBlock.getHash(); updater.putBlockHeader(hash, genesisBlock.getHeader()); updater.putBlockBody(hash, genesisBlock.getBody()); updater.putTransactionReceipts(hash, emptyList()); updater.putTotalDifficulty(hash, calculateTotalDifficulty(genesisBlock)); updater.putBlockHash(genesisBlock.getHeader().getNumber(), hash); updater.setChainHead(hash); updater.commit(); } else { // Verify genesis block is consistent with stored blockchain. final Optional<Hash> genesisHash = getBlockHashByNumber(BlockHeader.GENESIS_BLOCK_NUMBER); if (genesisHash.isEmpty()) { throw new IllegalStateException("Blockchain is missing genesis block data."); } if (!genesisHash.get().equals(genesisBlock.getHash())) { throw new InvalidConfigurationException( "Supplied genesis block does not match stored chain data.\n" + "Please specify a different data directory with --data-path or specify the original genesis file with --genesis-file."); } } } private boolean blockIsAlreadyTracked(final Block block) { if (block.getHeader().getParentHash().equals(chainHeader.getHash())) { // If this block builds on our chain head it would have a higher TD and be the chain head // but since it isn't we mustn't have imported it yet. // Saves a db read for the most common case return false; } return blockchainStorage.getBlockHeader(block.getHash()).isPresent(); } private boolean blockIsConnected(final Block block) { return blockchainStorage.getBlockHeader(block.getHeader().getParentHash()).isPresent(); } private void addAddedLogsWithMetadata( final List<LogWithMetadata> logsWithMetadata, final BlockWithReceipts blockWithReceipts) { logsWithMetadata.addAll( 0, LogWithMetadata.generate( blockWithReceipts.getBlock(), blockWithReceipts.getReceipts(), false)); } private void addRemovedLogsWithMetadata( final List<LogWithMetadata> logsWithMetadata, final BlockWithReceipts blockWithReceipts) { logsWithMetadata.addAll( Lists.reverse( LogWithMetadata.generate( blockWithReceipts.getBlock(), blockWithReceipts.getReceipts(), true))); } private Optional<BlockWithReceipts> getBlockWithReceipts(final BlockHeader blockHeader) { return blockchainStorage .getBlockBody(blockHeader.getHash()) .map(body -> new Block(blockHeader, body)) .flatMap( block -> blockchainStorage .getTransactionReceipts(blockHeader.getHash()) .map(receipts -> new BlockWithReceipts(block, receipts))); } private BlockWithReceipts getParentBlockWithReceipts(final BlockWithReceipts blockWithReceipts) { return blockchainStorage .getBlockHeader(blockWithReceipts.getHeader().getParentHash()) .flatMap(this::getBlockWithReceipts) .get(); } @Override public long observeBlockAdded(final BlockAddedObserver observer) { checkNotNull(observer); return blockAddedObservers.subscribe(observer); } @Override public boolean removeObserver(final long observerId) { return blockAddedObservers.unsubscribe(observerId); } @Override public long observeChainReorg(final ChainReorgObserver observer) { checkNotNull(observer); return blockReorgObservers.subscribe(observer); } @Override public boolean removeChainReorgObserver(final long observerId) { return blockReorgObservers.unsubscribe(observerId); } @VisibleForTesting int observerCount() { return blockAddedObservers.getSubscriberCount(); } private void notifyChainReorgBlockAdded(final BlockWithReceipts blockWithReceipts) { blockReorgObservers.forEach(observer -> observer.onBlockAdded(blockWithReceipts, this)); } }
1
24,072
This edit is non-essential.
hyperledger-besu
java
@@ -14,6 +14,14 @@ import { const isWeakMapSupported = typeof WeakMap == 'function'; +Object.setPrototypeOf = + Object.setPrototypeOf || + function(obj, proto) { + // eslint-disable-next-line + obj.__proto__ = proto; + return obj; + }; + function getClosestDomNodeParent(parent) { if (!parent) return {}; if (typeof parent.type == 'function') {
1
import { checkPropTypes } from './check-props'; import { options, Component } from 'preact'; import { ELEMENT_NODE, DOCUMENT_NODE, DOCUMENT_FRAGMENT_NODE } from './constants'; import { getOwnerStack, setupComponentStack, getCurrentVNode, getDisplayName } from './component-stack'; const isWeakMapSupported = typeof WeakMap == 'function'; function getClosestDomNodeParent(parent) { if (!parent) return {}; if (typeof parent.type == 'function') { return getClosestDomNodeParent(parent._parent); } return parent; } export function initDebug() { setupComponentStack(); /* eslint-disable no-console */ let oldBeforeDiff = options._diff; let oldDiffed = options.diffed; let oldVnode = options.vnode; let oldCatchError = options._catchError; let oldRoot = options._root; let oldHook = options._hook; const warnedComponents = !isWeakMapSupported ? null : { useEffect: new WeakMap(), useLayoutEffect: new WeakMap(), lazyPropTypes: new WeakMap() }; options._catchError = (error, vnode, oldVNode) => { let component = vnode && vnode._component; if (component && typeof error.then == 'function') { const promise = error; error = new Error( `Missing Suspense. The throwing component was: ${getDisplayName(vnode)}` ); let parent = vnode; for (; parent; parent = parent._parent) { if (parent._component && parent._component._childDidSuspend) { error = promise; break; } } // We haven't recovered and we know at this point that there is no // Suspense component higher up in the tree if (error instanceof Error) { throw error; } } oldCatchError(error, vnode, oldVNode); }; options._root = (vnode, parentNode) => { if (!parentNode) { throw new Error( 'Undefined parent passed to render(), this is the second argument.\n' + 'Check if the element is available in the DOM/has the correct id.' ); } let isValid; switch (parentNode.nodeType) { case ELEMENT_NODE: case DOCUMENT_FRAGMENT_NODE: case DOCUMENT_NODE: isValid = true; break; default: isValid = false; } if (!isValid) { let componentName = getDisplayName(vnode); throw new Error( `Expected a valid HTML node as a second argument to render. Received ${parentNode} instead: render(<${componentName} />, ${parentNode});` ); } if (oldRoot) oldRoot(vnode, parentNode); }; options._diff = vnode => { let { type, _parent: parent } = vnode; let parentVNode = getClosestDomNodeParent(parent); if (type === undefined) { throw new Error( 'Undefined component passed to createElement()\n\n' + 'You likely forgot to export your component or might have mixed up default and named imports' + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } else if (type != null && typeof type == 'object') { if (type._children !== undefined && type._dom !== undefined) { throw new Error( `Invalid type passed to createElement(): ${type}\n\n` + 'Did you accidentally pass a JSX literal as JSX twice?\n\n' + ` let My${getDisplayName(vnode)} = ${serializeVNode(type)};\n` + ` let vnode = <My${getDisplayName(vnode)} />;\n\n` + 'This usually happens when you export a JSX literal and not the component.' + `\n\n${getOwnerStack(vnode)}` ); } throw new Error( 'Invalid type passed to createElement(): ' + (Array.isArray(type) ? 'array' : type) ); } if ( (type === 'thead' || type === 'tfoot' || type === 'tbody') && parentVNode.type !== 'table' ) { console.error( 'Improper nesting of table. Your <thead/tbody/tfoot> should have a <table> parent.' + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } else if ( type === 'tr' && parentVNode.type !== 'thead' && parentVNode.type !== 'tfoot' && parentVNode.type !== 'tbody' && parentVNode.type !== 'table' ) { console.error( 'Improper nesting of table. Your <tr> should have a <thead/tbody/tfoot/table> parent.' + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } else if (type === 'td' && parentVNode.type !== 'tr') { console.error( 'Improper nesting of table. Your <td> should have a <tr> parent.' + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } else if (type === 'th' && parentVNode.type !== 'tr') { console.error( 'Improper nesting of table. Your <th> should have a <tr>.' + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } if ( vnode.ref !== undefined && typeof vnode.ref != 'function' && typeof vnode.ref != 'object' && !('$$typeof' in vnode) // allow string refs when preact-compat is installed ) { throw new Error( `Component's "ref" property should be a function, or an object created ` + `by createRef(), but got [${typeof vnode.ref}] instead\n` + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } if (typeof vnode.type == 'string') { for (const key in vnode.props) { if ( key[0] === 'o' && key[1] === 'n' && typeof vnode.props[key] != 'function' && vnode.props[key] != null ) { throw new Error( `Component's "${key}" property should be a function, ` + `but got [${typeof vnode.props[key]}] instead\n` + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); } } } // Check prop-types if available if (typeof vnode.type == 'function' && vnode.type.propTypes) { if ( vnode.type.displayName === 'Lazy' && warnedComponents && !warnedComponents.lazyPropTypes.has(vnode.type) ) { const m = 'PropTypes are not supported on lazy(). Use propTypes on the wrapped component itself. '; try { const lazyVNode = vnode.type(); warnedComponents.lazyPropTypes.set(vnode.type, true); console.warn( m + `Component wrapped in lazy() is ${getDisplayName(lazyVNode)}` ); } catch (promise) { console.warn( m + "We will log the wrapped component's name once it is loaded." ); } } checkPropTypes( vnode.type.propTypes, vnode.props, 'prop', getDisplayName(vnode) ); } if (oldBeforeDiff) oldBeforeDiff(vnode); }; options._hook = (comp, index, type) => { if (!comp) { throw new Error('Hook can only be invoked from render methods.'); } if (oldHook) oldHook(comp, index, type); }; const warn = (property, err) => ({ get() { console.warn(`getting vnode.${property} is deprecated, ${err}`); }, set() { console.warn(`setting vnode.${property} is not allowed, ${err}`); } }); const deprecatedAttributes = { nodeName: warn('nodeName', 'use vnode.type'), attributes: warn('attributes', 'use vnode.props'), children: warn('children', 'use vnode.props.children') }; const deprecatedProto = Object.create({}, deprecatedAttributes); options.vnode = vnode => { const props = vnode.props; if ( vnode.type !== null && props != null && ('__source' in props || '__self' in props) ) { const newProps = (vnode.props = {}); for (let i in props) { const v = props[i]; if (i === '__source') vnode.__source = v; else if (i === '__self') vnode.__self = v; else newProps[i] = v; } } Object.setPrototypeOf(vnode, deprecatedProto); if (oldVnode) oldVnode(vnode); }; options.diffed = vnode => { // Check if the user passed plain objects as children. Note that we cannot // move this check into `options.vnode` because components can receive // children in any shape they want (e.g. // `<MyJSONFormatter>{{ foo: 123, bar: "abc" }}</MyJSONFormatter>`). // Putting this check in `options.diffed` ensures that // `vnode._children` is set and that we only validate the children // that were actually rendered. if (vnode._children) { vnode._children.forEach(child => { if (child && child.type === undefined) { // Remove internal vnode keys that will always be patched delete child._parent; delete child._depth; const keys = Object.keys(child).join(','); throw new Error( `Objects are not valid as a child. Encountered an object with the keys {${keys}}.` + `\n\n${getOwnerStack(vnode)}` ); } }); } /** @type {import('./internal').Component} */ const component = vnode._component; if (component && component.__hooks) { let hooks = component.__hooks; if (Array.isArray(hooks._list)) { hooks._list.forEach(hook => { if (hook._factory && (!hook._args || !Array.isArray(hook._args))) { let componentName = getDisplayName(vnode); console.warn( `In ${componentName} you are calling useMemo/useCallback without passing arguments.\n` + `This is a noop since it will not be able to memoize, it will execute it every render.` + `\n\n${getOwnerStack(vnode)}` ); } }); } } if (oldDiffed) oldDiffed(vnode); if (vnode._children != null) { const keys = []; for (let i = 0; i < vnode._children.length; i++) { const child = vnode._children[i]; if (!child || child.key == null) continue; const key = child.key; if (keys.indexOf(key) !== -1) { console.error( 'Following component has two or more children with the ' + `same key attribute: "${key}". This may cause glitches and misbehavior ` + 'in rendering process. Component: \n\n' + serializeVNode(vnode) + `\n\n${getOwnerStack(vnode)}` ); // Break early to not spam the console break; } keys.push(key); } } }; } const setState = Component.prototype.setState; Component.prototype.setState = function(update, callback) { if (this._vnode == null) { // `this._vnode` will be `null` during componentWillMount. But it // is perfectly valid to call `setState` during cWM. So we // need an additional check to verify that we are dealing with a // call inside constructor. if (this.state == null) { console.warn( `Calling "this.setState" inside the constructor of a component is a ` + `no-op and might be a bug in your application. Instead, set ` + `"this.state = {}" directly.\n\n${getOwnerStack(getCurrentVNode())}` ); } } else if (this._parentDom == null) { console.warn( `Can't call "this.setState" on an unmounted component. This is a no-op, ` + `but it indicates a memory leak in your application. To fix, cancel all ` + `subscriptions and asynchronous tasks in the componentWillUnmount method.` + `\n\n${getOwnerStack(this._vnode)}` ); } return setState.call(this, update, callback); }; const forceUpdate = Component.prototype.forceUpdate; Component.prototype.forceUpdate = function(callback) { if (this._vnode == null) { console.warn( `Calling "this.forceUpdate" inside the constructor of a component is a ` + `no-op and might be a bug in your application.\n\n${getOwnerStack( getCurrentVNode() )}` ); } else if (this._parentDom == null) { console.warn( `Can't call "this.forceUpdate" on an unmounted component. This is a no-op, ` + `but it indicates a memory leak in your application. To fix, cancel all ` + `subscriptions and asynchronous tasks in the componentWillUnmount method.` + `\n\n${getOwnerStack(this._vnode)}` ); } return forceUpdate.call(this, callback); }; /** * Serialize a vnode tree to a string * @param {import('./internal').VNode} vnode * @returns {string} */ export function serializeVNode(vnode) { let { props } = vnode; let name = getDisplayName(vnode); let attrs = ''; for (let prop in props) { if (props.hasOwnProperty(prop) && prop !== 'children') { let value = props[prop]; // If it is an object but doesn't have toString(), use Object.toString if (typeof value == 'function') { value = `function ${value.displayName || value.name}() {}`; } value = Object(value) === value && !value.toString ? Object.prototype.toString.call(value) : value + ''; attrs += ` ${prop}=${JSON.stringify(value)}`; } } let children = props.children; return `<${name}${attrs}${ children && children.length ? '>..</' + name + '>' : ' />' }`; }
1
15,717
Might be safer to ponyfill this?
preactjs-preact
js
@@ -57,18 +57,11 @@ func Create(dir string) (*Local, error) { } // test if config file already exists - _, err := os.Lstat(backend.Paths.Config) + _, err := os.Lstat(filepath.Join(dir, backend.Paths.Config)) if err == nil { return nil, errors.New("config file already exists") } - // test if directories already exist - for _, d := range dirs[1:] { - if _, err := os.Stat(d); err == nil { - return nil, fmt.Errorf("dir %s already exists", d) - } - } - // create paths for data, refs and temp for _, d := range dirs { err := os.MkdirAll(d, backend.Modes.Dir)
1
package local import ( "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "sort" "sync" "github.com/restic/restic/backend" ) var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match") type Local struct { p string mu sync.Mutex open map[string][]*os.File // Contains open files. Guarded by 'mu'. } // Open opens the local backend at dir. func Open(dir string) (*Local, error) { items := []string{ dir, filepath.Join(dir, backend.Paths.Data), filepath.Join(dir, backend.Paths.Snapshots), filepath.Join(dir, backend.Paths.Index), filepath.Join(dir, backend.Paths.Locks), filepath.Join(dir, backend.Paths.Keys), filepath.Join(dir, backend.Paths.Temp), } // test if all necessary dirs are there for _, d := range items { if _, err := os.Stat(d); err != nil { return nil, fmt.Errorf("%s does not exist", d) } } return &Local{p: dir, open: make(map[string][]*os.File)}, nil } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. func Create(dir string) (*Local, error) { dirs := []string{ dir, filepath.Join(dir, backend.Paths.Data), filepath.Join(dir, backend.Paths.Snapshots), filepath.Join(dir, backend.Paths.Index), filepath.Join(dir, backend.Paths.Locks), filepath.Join(dir, backend.Paths.Keys), filepath.Join(dir, backend.Paths.Temp), } // test if config file already exists _, err := os.Lstat(backend.Paths.Config) if err == nil { return nil, errors.New("config file already exists") } // test if directories already exist for _, d := range dirs[1:] { if _, err := os.Stat(d); err == nil { return nil, fmt.Errorf("dir %s already exists", d) } } // create paths for data, refs and temp for _, d := range dirs { err := os.MkdirAll(d, backend.Modes.Dir) if err != nil { return nil, err } } // open backend return Open(dir) } // Location returns this backend's location (the directory name). func (b *Local) Location() string { return b.p } // Return temp directory in correct directory for this backend. func (b *Local) tempFile() (*os.File, error) { return ioutil.TempFile(filepath.Join(b.p, backend.Paths.Temp), "temp-") } type localBlob struct { f *os.File size uint final bool basedir string } func (lb *localBlob) Write(p []byte) (int, error) { if lb.final { return 0, errors.New("blob already closed") } n, err := lb.f.Write(p) lb.size += uint(n) return n, err } func (lb *localBlob) Size() uint { return lb.size } func (lb *localBlob) Finalize(t backend.Type, name string) error { if lb.final { return errors.New("Already finalized") } lb.final = true err := lb.f.Close() if err != nil { return fmt.Errorf("local: file.Close: %v", err) } f := filename(lb.basedir, t, name) // create directories if necessary, ignore errors if t == backend.Data { os.MkdirAll(filepath.Dir(f), backend.Modes.Dir) } // test if new path already exists if _, err := os.Stat(f); err == nil { return fmt.Errorf("Close(): file %v already exists", f) } if err := os.Rename(lb.f.Name(), f); err != nil { return err } // set mode to read-only fi, err := os.Stat(f) if err != nil { return err } return setNewFileMode(f, fi) } // Create creates a new Blob. The data is available only after Finalize() // has been called on the returned Blob. func (b *Local) Create() (backend.Blob, error) { // TODO: make sure that tempfile is removed upon error // create tempfile in backend file, err := b.tempFile() if err != nil { return nil, err } blob := localBlob{ f: file, basedir: b.p, } b.mu.Lock() open, _ := b.open["blobs"] b.open["blobs"] = append(open, file) b.mu.Unlock() return &blob, nil } // Construct path for given Type and name. func filename(base string, t backend.Type, name string) string { if t == backend.Config { return filepath.Join(base, "config") } return filepath.Join(dirname(base, t, name), name) } // Construct directory for given Type. func dirname(base string, t backend.Type, name string) string { var n string switch t { case backend.Data: n = backend.Paths.Data if len(name) > 2 { n = filepath.Join(n, name[:2]) } case backend.Snapshot: n = backend.Paths.Snapshots case backend.Index: n = backend.Paths.Index case backend.Lock: n = backend.Paths.Locks case backend.Key: n = backend.Paths.Keys } return filepath.Join(base, n) } // Get returns a reader that yields the content stored under the given // name. The reader should be closed after draining it. func (b *Local) Get(t backend.Type, name string) (io.ReadCloser, error) { file, err := os.Open(filename(b.p, t, name)) if err != nil { return nil, err } b.mu.Lock() open, _ := b.open[filename(b.p, t, name)] b.open[filename(b.p, t, name)] = append(open, file) b.mu.Unlock() return file, nil } // GetReader returns an io.ReadCloser for the Blob with the given name of // type t at offset and length. If length is 0, the reader reads until EOF. func (b *Local) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { f, err := os.Open(filename(b.p, t, name)) if err != nil { return nil, err } b.mu.Lock() open, _ := b.open[filename(b.p, t, name)] b.open[filename(b.p, t, name)] = append(open, f) b.mu.Unlock() _, err = f.Seek(int64(offset), 0) if err != nil { return nil, err } if length == 0 { return f, nil } return backend.LimitReadCloser(f, int64(length)), nil } // Test returns true if a blob of the given type and name exists in the backend. func (b *Local) Test(t backend.Type, name string) (bool, error) { _, err := os.Stat(filename(b.p, t, name)) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } return true, nil } // Remove removes the blob with the given name and type. func (b *Local) Remove(t backend.Type, name string) error { // close all open files we may have. fn := filename(b.p, t, name) b.mu.Lock() open, _ := b.open[fn] for _, file := range open { file.Close() } b.open[fn] = nil b.mu.Unlock() // reset read-only flag err := os.Chmod(fn, 0666) if err != nil { return err } return os.Remove(fn) } // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { // TODO: use os.Open() and d.Readdirnames() instead of Glob() var pattern string if t == backend.Data { pattern = filepath.Join(dirname(b.p, t, ""), "*", "*") } else { pattern = filepath.Join(dirname(b.p, t, ""), "*") } ch := make(chan string) matches, err := filepath.Glob(pattern) if err != nil { close(ch) return ch } for i := range matches { matches[i] = filepath.Base(matches[i]) } sort.Strings(matches) go func() { defer close(ch) for _, m := range matches { if m == "" { continue } select { case ch <- m: case <-done: return } } }() return ch } // Delete removes the repository and all files. func (b *Local) Delete() error { b.Close() return os.RemoveAll(b.p) } // Close closes all open files. // They may have been closed already, // so we ignore all errors. func (b *Local) Close() error { b.mu.Lock() for _, open := range b.open { for _, file := range open { file.Close() } } b.open = make(map[string][]*os.File) b.mu.Unlock() return nil }
1
6,898
why was this join not necessary before?
restic-restic
go
@@ -163,6 +163,17 @@ public class TestExitableDirectoryReader extends LuceneTestCase { searcher.search(query, 10); reader.close(); + // Set a fairly high timeout value (infinite) and expect the query to complete in that time frame. + // Not checking the validity of the result, but checking the sampling kicks in to reduce the number of timeout check + CountingQueryTimeout queryTimeout = new CountingQueryTimeout(); + directoryReader = DirectoryReader.open(directory); + exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, queryTimeout); + reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); + searcher = new IndexSearcher(reader); + searcher.search(query, 10); + reader.close(); + assertEquals(3, queryTimeout.getShouldExitCallCount()); + directory.close(); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.index; import java.io.IOException; import java.util.Arrays; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.ExitableDirectoryReader.ExitingReaderException; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; /** * Test that uses a default/lucene Implementation of {@link QueryTimeout} * to exit out long running queries that take too long to iterate over Terms. */ public class TestExitableDirectoryReader extends LuceneTestCase { private static class TestReader extends FilterLeafReader { private static class TestTerms extends FilterTerms { TestTerms(Terms in) { super(in); } @Override public TermsEnum iterator() throws IOException { return new TestTermsEnum(super.iterator()); } } private static class TestTermsEnum extends FilterTermsEnum { public TestTermsEnum(TermsEnum in) { super(in); } /** * Sleep between iterations to timeout things. */ @Override public BytesRef next() throws IOException { try { // Sleep for 100ms before each .next() call. Thread.sleep(100); } catch (InterruptedException e) { } return in.next(); } } public TestReader(LeafReader reader) throws IOException { super(reader); } @Override public Terms terms(String field) throws IOException { Terms terms = super.terms(field); return terms==null ? null : new TestTerms(terms); } @Override public CacheHelper getCoreCacheHelper() { return in.getCoreCacheHelper(); } @Override public CacheHelper getReaderCacheHelper() { return in.getReaderCacheHelper(); } } /** * Tests timing out of TermsEnum iterations * @throws Exception on error */ public void testExitableFilterTermsIndexReader() throws Exception { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random()))); Document d1 = new Document(); d1.add(newTextField("default", "one two", Field.Store.YES)); writer.addDocument(d1); Document d2 = new Document(); d2.add(newTextField("default", "one three", Field.Store.YES)); writer.addDocument(d2); Document d3 = new Document(); d3.add(newTextField("default", "ones two four", Field.Store.YES)); writer.addDocument(d3); writer.forceMerge(1); writer.commit(); writer.close(); DirectoryReader directoryReader; DirectoryReader exitableDirectoryReader; IndexReader reader; IndexSearcher searcher; Query query = new PrefixQuery(new Term("default", "o")); // Set a fairly high timeout value (infinite) and expect the query to complete in that time frame. // Not checking the validity of the result, all we are bothered about in this test is the timing out. directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, infiniteQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); searcher = new IndexSearcher(reader); searcher.search(query, 10); reader.close(); // Set a really low timeout value (immediate) and expect an Exception directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, immediateQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); IndexSearcher slowSearcher = new IndexSearcher(reader); expectThrows(ExitingReaderException.class, () -> { slowSearcher.search(query, 10); }); reader.close(); // Set maximum time out and expect the query to complete. // Not checking the validity of the result, all we are bothered about in this test is the timing out. directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, infiniteQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); searcher = new IndexSearcher(reader); searcher.search(query, 10); reader.close(); // Set a negative time allowed and expect the query to complete (should disable timeouts) // Not checking the validity of the result, all we are bothered about in this test is the timing out. directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, disabledQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); searcher = new IndexSearcher(reader); searcher.search(query, 10); reader.close(); directory.close(); } /** * Tests timing out of PointValues queries * * @throws Exception on error */ public void testExitablePointValuesIndexReader() throws Exception { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random()))); Document d1 = new Document(); d1.add(new IntPoint("default", 10)); writer.addDocument(d1); Document d2 = new Document(); d2.add(new IntPoint("default", 100)); writer.addDocument(d2); Document d3 = new Document(); d3.add(new IntPoint("default", 1000)); writer.addDocument(d3); writer.forceMerge(1); writer.commit(); writer.close(); DirectoryReader directoryReader; DirectoryReader exitableDirectoryReader; IndexReader reader; IndexSearcher searcher; Query query = IntPoint.newRangeQuery("default", 10, 20); // Set a fairly high timeout value (infinite) and expect the query to complete in that time frame. // Not checking the validity of the result, all we are bothered about in this test is the timing out. directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, infiniteQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); searcher = new IndexSearcher(reader); searcher.search(query, 10); reader.close(); // Set a really low timeout value (immediate) and expect an Exception directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, immediateQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); IndexSearcher slowSearcher = new IndexSearcher(reader); expectThrows(ExitingReaderException.class, () -> { slowSearcher.search(query, 10); }); reader.close(); // Set maximum time out and expect the query to complete. // Not checking the validity of the result, all we are bothered about in this test is the timing out. directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, infiniteQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); searcher = new IndexSearcher(reader); searcher.search(query, 10); reader.close(); // Set a negative time allowed and expect the query to complete (should disable timeouts) // Not checking the validity of the result, all we are bothered about in this test is the timing out. directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, disabledQueryTimeout()); reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); searcher = new IndexSearcher(reader); searcher.search(query, 10); reader.close(); directory.close(); } private static QueryTimeout disabledQueryTimeout() { return new QueryTimeout() { @Override public boolean shouldExit() { return false; } @Override public boolean isTimeoutEnabled() { return false; } }; } private static QueryTimeout infiniteQueryTimeout() { return new QueryTimeout() { @Override public boolean shouldExit() { return false; } @Override public boolean isTimeoutEnabled() { return true; } }; } private static QueryTimeout immediateQueryTimeout() { return new QueryTimeout() { @Override public boolean shouldExit() { return true; } @Override public boolean isTimeoutEnabled() { return true; } }; } @FunctionalInterface interface DvFactory { DocValuesIterator create(LeafReader leaf) throws IOException; } public void testDocValues() throws IOException { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random()))); Document d1 = new Document(); addDVs(d1, 10); writer.addDocument(d1); Document d2 = new Document(); addDVs(d2, 100); writer.addDocument(d2); Document d3 = new Document(); addDVs(d3, 1000); writer.addDocument(d3); writer.forceMerge(1); writer.commit(); writer.close(); DirectoryReader directoryReader; DirectoryReader exitableDirectoryReader; for (DvFactory dvFactory : Arrays.<DvFactory>asList( (r) -> r.getSortedDocValues("sorted"), (r) -> r.getSortedSetDocValues("sortedset"), (r) -> r.getSortedNumericDocValues("sortednumeric"), (r) -> r.getNumericDocValues("numeric"), (r) -> r.getBinaryDocValues("binary") )) { directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, immediateQueryTimeout()); { IndexReader reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); expectThrows(ExitingReaderException.class, () -> { LeafReader leaf = reader.leaves().get(0).reader(); DocValuesIterator iter = dvFactory.create(leaf); scan(leaf, iter); }); reader.close(); } directoryReader = DirectoryReader.open(directory); exitableDirectoryReader = new ExitableDirectoryReader(directoryReader, random().nextBoolean()? infiniteQueryTimeout() : disabledQueryTimeout()); { IndexReader reader = new TestReader(getOnlyLeafReader(exitableDirectoryReader)); final LeafReader leaf = reader.leaves().get(0).reader(); scan(leaf, dvFactory.create(leaf)); assertNull(leaf.getNumericDocValues("absent")); assertNull(leaf.getBinaryDocValues("absent")); assertNull(leaf.getSortedDocValues("absent")); assertNull(leaf.getSortedNumericDocValues("absent")); assertNull(leaf.getSortedSetDocValues("absent")); reader.close(); } } directory.close(); } static private void scan(LeafReader leaf, DocValuesIterator iter ) throws IOException { for (iter.nextDoc(); iter.docID()!=DocIdSetIterator.NO_MORE_DOCS && iter.docID()<leaf.maxDoc();) { final int nextDocId = iter.docID()+1; if (random().nextBoolean() && nextDocId<leaf.maxDoc()) { if(random().nextBoolean()) { iter.advance(nextDocId); } else { iter.advanceExact(nextDocId); } } else { iter.nextDoc(); } } } private void addDVs(Document d1, int i) { d1.add(new NumericDocValuesField("numeric", i)); d1.add(new BinaryDocValuesField("binary", new BytesRef(""+i))); d1.add(new SortedDocValuesField("sorted", new BytesRef(""+i))); d1.add(new SortedNumericDocValuesField("sortednumeric", i)); d1.add(new SortedSetDocValuesField("sortedset", new BytesRef(""+i))); } }
1
37,830
Here we compare to the expected call count 3. Because 3 TermsEnum are created: it is a PrefixQuery and there is one TermsEnum created for AutomatonQuery.intersect() (the next call timeout check is skipped once), then 2 TermsEnum created for the 2 matching terms "one" and "ones"). Would it be clearer to have a separate test method? We could index more docs, for example 50 with a prefix-generated term (e.g. "term"+increment). There would be a PrefixQuery for "term", the same test code, and we would test the TIMEOUT_CHECK_SAMPLING: - 1 TermsEnum for Automaton.intersect(), and the next calls would be sampled (50/TIMEOUT_CHECK_SAMPLING=3) => call count +4 - 1 TermsEnum for each enumerated term => call count +50 We could verify call count = 54 (instead of 101 without sampling)
apache-lucene-solr
java
@@ -28,6 +28,9 @@ module Bolt when 'file' { flags: ACTION_OPTS + %w[tmpdir], banner: FILE_HELP } + when 'inventory' + { flags: OPTIONS[:inventory] + OPTIONS[:global] + %w[format inventoryfile boltdir configfile], + banner: INVENTORY_HELP } when 'plan' case action when 'convert'
1
# frozen_string_literal: true # Note this file includes very few 'requires' because it expects to be used from the CLI. require 'optparse' module Bolt class BoltOptionParser < OptionParser OPTIONS = { inventory: %w[nodes targets query rerun description], authentication: %w[user password private-key host-key-check ssl ssl-verify], escalation: %w[run-as sudo-password], run_context: %w[concurrency inventoryfile save-rerun], global_config_setters: %w[modulepath boltdir configfile], transports: %w[transport connect-timeout tty], display: %w[format color verbose trace], global: %w[help version debug] }.freeze ACTION_OPTS = OPTIONS.values.flatten.freeze def get_help_text(subcommand, action = nil) case subcommand when 'apply' { flags: ACTION_OPTS + %w[noop execute compile-concurrency], banner: APPLY_HELP } when 'command' { flags: ACTION_OPTS, banner: COMMAND_HELP } when 'file' { flags: ACTION_OPTS + %w[tmpdir], banner: FILE_HELP } when 'plan' case action when 'convert' { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: PLAN_CONVERT_HELP } when 'show' { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: PLAN_SHOW_HELP } when 'run' { flags: ACTION_OPTS + %w[params compile-concurrency tmpdir], banner: PLAN_RUN_HELP } else { flags: ACTION_OPTS + %w[params compile-concurrency tmpdir], banner: PLAN_HELP } end when 'puppetfile' case action when 'install' { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: PUPPETFILE_INSTALL_HELP } when 'show-modules' { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: PUPPETFILE_SHOWMODULES_HELP } else { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: PUPPETFILE_HELP } end when 'script' { flags: ACTION_OPTS + %w[tmpdir], banner: SCRIPT_HELP } when 'secret' { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: SECRET_HELP } when 'task' case action when 'show' { flags: OPTIONS[:global] + OPTIONS[:global_config_setters], banner: TASK_SHOW_HELP } when 'run' { flags: ACTION_OPTS + %w[params tmpdir], banner: TASK_RUN_HELP } else { flags: ACTION_OPTS + %w[params tmpdir], banner: TASK_HELP } end else { flags: OPTIONS[:global], banner: BANNER } end end def self.examples(cmd, desc) <<-EXAMP #{desc} a Windows host via WinRM, providing for the password bolt #{cmd} -n winrm://winhost -u Administrator -p #{desc} the local machine, a Linux host via SSH, and hosts from a group specified in an inventory file bolt #{cmd} -n localhost,nixhost,node_group #{desc} Windows hosts queried from PuppetDB via WinRM as a domain user, prompting for the password bolt #{cmd} -q 'inventory[certname] { facts.os.family = "windows" }' --transport winrm -u 'domain\\Administrator' -p EXAMP end BANNER = <<-HELP Usage: bolt <subcommand> <action> Available subcommands: bolt command run <command> Run a command remotely bolt file upload <src> <dest> Upload a local file or directory bolt script run <script> Upload a local script and run it remotely bolt task show Show list of available tasks bolt task show <task> Show documentation for task bolt task run <task> [params] Run a Puppet task bolt plan convert <plan_path> Convert a YAML plan to a Puppet plan bolt plan show Show list of available plans bolt plan show <plan> Show details for plan bolt plan run <plan> [params] Run a Puppet task plan bolt apply <manifest> Apply Puppet manifest code bolt puppetfile install Install modules from a Puppetfile into a Boltdir bolt puppetfile show-modules List modules available to Bolt bolt secret createkeys Create new encryption keys bolt secret encrypt <plaintext> Encrypt a value bolt secret decrypt <encrypted> Decrypt a value Run `bolt <subcommand> --help` to view specific examples. Available options are: HELP TASK_HELP = <<-HELP Usage: bolt task <action> <task> [parameters] Available actions are: show Show list of available tasks show <task> Show documentation for task run <task> Run a Puppet task Parameters are of the form <parameter>=<value>. #{examples('task run facts', 'run facter on')} Available options are: HELP TASK_SHOW_HELP = <<-HELP Usage: bolt task show <task> Available actions are: show Show list of available tasks show <task> Show documentation for task Available options are: HELP TASK_RUN_HELP = <<-HELP Usage: bolt task run <task> [parameters] Parameters are of the form <parameter>=<value>. #{examples('task run facts', 'run facter on')} Available options are: HELP COMMAND_HELP = <<-HELP Usage: bolt command <action> <command> Available actions are: run Run a command remotely #{examples('command run hostname', 'run hostname on')} Available options are: HELP SCRIPT_HELP = <<-HELP Usage: bolt script <action> <script> [[arg1] ... [argN]] Available actions are: run Upload a local script and run it remotely #{examples('script run my_script.ps1 some args', 'run a script on')} Available options are: HELP PLAN_HELP = <<-HELP Usage: bolt plan <action> <plan> [parameters] Available actions are: convert <plan_path> Convert a YAML plan to a Puppet plan show Show list of available plans show <plan> Show details for plan run Run a Puppet task plan Parameters are of the form <parameter>=<value>. #{examples('plan run canary command=hostname', 'run the canary plan on')} Available options are: HELP PLAN_CONVERT_HELP = <<-HELP Usage: bolt plan convert <plan_path> Available options are: HELP PLAN_SHOW_HELP = <<-HELP Usage: bolt plan show <plan> Available actions are: show Show list of available plans show <plan> Show details for plan Available options are: HELP PLAN_RUN_HELP = <<-HELP Usage: bolt plan run <plan> [parameters] Parameters are of the form <parameter>=<value>. #{examples('plan run canary command=hostname', 'run the canary plan on')} Available options are: HELP FILE_HELP = <<-HELP Usage: bolt file <action> Available actions are: upload <src> <dest> Upload local file or directory <src> to <dest> on each node #{examples('file upload /tmp/source /etc/profile.d/login.sh', 'upload a file to')} Available options are: HELP PUPPETFILE_HELP = <<-HELP Usage: bolt puppetfile <action> Available actions are: install Install modules from a Puppetfile into a Boltdir show-modules List modules available to Bolt Install modules into the local Boltdir bolt puppetfile install Available options are: HELP PUPPETFILE_INSTALL_HELP = <<-HELP Usage: bolt puppetfile install Install modules into the local Boltdir bolt puppetfile install Available options are: HELP PUPPETFILE_SHOWMODULES_HELP = <<-HELP Usage: bolt puppetfile show-modules Available options are: HELP APPLY_HELP = <<-HELP Usage: bolt apply <manifest.pp> #{examples('apply site.pp', 'apply a manifest on')} bolt apply site.pp --nodes foo.example.com,bar.example.com Available options are: HELP SECRET_HELP = <<~SECRET_HELP Manage secrets for inventory and hiera data. Available actions are: createkeys Create new encryption keys encrypt Encrypt a value decrypt Decrypt a value Available options are: SECRET_HELP def initialize(options) super() @options = options define('-n', '--nodes NODES', 'Alias for --targets') do |nodes| @options [:nodes] ||= [] @options[:nodes] << get_arg_input(nodes) end define('-t', '--targets TARGETS', 'Identifies the targets of command.', 'Enter a comma-separated list of target URIs or group names.', "Or read a target list from an input file '@<file>' or stdin '-'.", 'Example: --targets localhost,node_group,ssh://nix.com:23,winrm://windows.puppet.com', 'URI format is [protocol://]host[:port]', "SSH is the default protocol; may be #{TRANSPORTS.keys.join(', ')}", 'For Windows targets, specify the winrm:// protocol if it has not be configured', 'For SSH, port defaults to `22`', 'For WinRM, port defaults to `5985` or `5986` based on the --[no-]ssl setting') do |targets| @options[:targets] ||= [] @options[:targets] << get_arg_input(targets) end define('-q', '--query QUERY', 'Query PuppetDB to determine the targets') do |query| @options[:query] = query end define('--rerun FILTER', 'Retry on nodes from the last run', "'all' all nodes that were part of the last run.", "'failure' nodes that failed in the last run.", "'success' nodes that succeeded in the last run.") do |rerun| @options[:rerun] = rerun end define('--noop', 'Execute a task that supports it in noop mode') do |_| @options[:noop] = true end define('--description DESCRIPTION', 'Description to use for the job') do |description| @options[:description] = description end define('--params PARAMETERS', "Parameters to a task or plan as json, a json file '@<file>', or on stdin '-'") do |params| @options[:task_options] = parse_params(params) end define('-e', '--execute CODE', "Puppet manifest code to apply to the targets") do |code| @options[:code] = code end separator "\nAuthentication:" define('-u', '--user USER', 'User to authenticate as') do |user| @options[:user] = user end define('-p', '--password [PASSWORD]', 'Password to authenticate with. Omit the value to prompt for the password.') do |password| if password.nil? STDOUT.print "Please enter your password: " @options[:password] = STDIN.noecho(&:gets).chomp STDOUT.puts else @options[:password] = password end end define('--private-key KEY', 'Private ssh key to authenticate with') do |key| @options[:'private-key'] = key end define('--[no-]host-key-check', 'Check host keys with SSH') do |host_key_check| @options[:'host-key-check'] = host_key_check end define('--[no-]ssl', 'Use SSL with WinRM') do |ssl| @options[:ssl] = ssl end define('--[no-]ssl-verify', 'Verify remote host SSL certificate with WinRM') do |ssl_verify| @options[:'ssl-verify'] = ssl_verify end separator "\nEscalation:" define('--run-as USER', 'User to run as using privilege escalation') do |user| @options[:'run-as'] = user end define('--sudo-password [PASSWORD]', 'Password for privilege escalation. Omit the value to prompt for the password.') do |password| if password.nil? STDOUT.print "Please enter your privilege escalation password: " @options[:'sudo-password'] = STDIN.noecho(&:gets).chomp STDOUT.puts else @options[:'sudo-password'] = password end end separator "\nRun context:" define('-c', '--concurrency CONCURRENCY', Integer, 'Maximum number of simultaneous connections (default: 100)') do |concurrency| @options[:concurrency] = concurrency end define('--compile-concurrency CONCURRENCY', Integer, 'Maximum number of simultaneous manifest block compiles (default: number of cores)') do |concurrency| @options[:'compile-concurrency'] = concurrency end define('-m', '--modulepath MODULES', "List of directories containing modules, separated by '#{File::PATH_SEPARATOR}'") do |modulepath| # When specified from the CLI, modulepath entries are relative to pwd @options[:modulepath] = modulepath.split(File::PATH_SEPARATOR).map do |moduledir| File.expand_path(moduledir) end end define('--boltdir FILEPATH', 'Specify what Boltdir to load config from (default: autodiscovered from current working dir)') do |path| @options[:boltdir] = path end define('--configfile FILEPATH', 'Specify where to load config from (default: ~/.puppetlabs/bolt/bolt.yaml)') do |path| @options[:configfile] = path end define('-i', '--inventoryfile FILEPATH', 'Specify where to load inventory from (default: ~/.puppetlabs/bolt/inventory.yaml)') do |path| if ENV.include?(Bolt::Inventory::ENVIRONMENT_VAR) raise Bolt::CLIError, "Cannot pass inventory file when #{Bolt::Inventory::ENVIRONMENT_VAR} is set" end @options[:inventoryfile] = File.expand_path(path) end define('--[no-]save-rerun', 'Whether to update the rerun file after this command.') do |save| @options[:'save-rerun'] = save end separator "\nTransports:" define('--transport TRANSPORT', TRANSPORTS.keys.map(&:to_s), "Specify a default transport: #{TRANSPORTS.keys.join(', ')}") do |t| @options[:transport] = t end define('--connect-timeout TIMEOUT', Integer, 'Connection timeout (defaults vary)') do |timeout| @options[:'connect-timeout'] = timeout end define('--[no-]tty', 'Request a pseudo TTY on nodes that support it') do |tty| @options[:tty] = tty end define('--tmpdir DIR', 'The directory to upload and execute temporary files on the target') do |tmpdir| @options[:tmpdir] = tmpdir end separator "\nDisplay:" define('--format FORMAT', 'Output format to use: human or json') do |format| @options[:format] = format end define('--[no-]color', 'Whether to show output in color') do |color| @options[:color] = color end define('-v', '--[no-]verbose', 'Display verbose logging') do |value| @options[:verbose] = value end define('--trace', 'Display error stack traces') do |_| @options[:trace] = true end separator "\nGlobal:" define('-h', '--help', 'Display help') do |_| @options[:help] = true end define('--version', 'Display the version') do |_| puts Bolt::VERSION raise Bolt::CLIExit end define('--debug', 'Display debug logging') do |_| @options[:debug] = true end end def remove_excluded_opts(option_list) # Remove any options that are not available for the specified subcommand top.list.delete_if do |opt| opt.respond_to?(:switch_name) && !option_list.include?(opt.switch_name) end # Remove any separators if all options of that type have been removed top.list.delete_if do |opt| i = top.list.index(opt) opt.is_a?(String) && top.list[i + 1].is_a?(String) end end def update help_text = get_help_text(@options[:subcommand], @options[:action]) # Update the banner according to the subcommand self.banner = help_text[:banner] # Builds the option list for the specified subcommand and removes all excluded # options from the help text remove_excluded_opts(help_text[:flags]) end def parse_params(params) json = get_arg_input(params) JSON.parse(json) rescue JSON::ParserError => e raise Bolt::CLIError, "Unable to parse --params value as JSON: #{e}" end def get_arg_input(value) if value.start_with?('@') file = value.sub(/^@/, '') read_arg_file(file) elsif value == '-' STDIN.read else value end end def read_arg_file(file) File.read(File.expand_path(file)) rescue StandardError => e raise Bolt::FileError.new("Error attempting to read #{file}: #{e}", file) end end end
1
11,839
Add `--format` as an option. Does it make sense to have all display options available? Also need to add `--inventoryfile` and possibly the global_config_options.
puppetlabs-bolt
rb
@@ -53,5 +53,10 @@ namespace OpenTelemetry.Instrumentation.AspNetCore /// The type of this object depends on the event, which is given by the above parameter.</para> /// </remarks> public Action<Activity, string, object> Enrich { get; set; } + + /// <summary> + /// Gets or sets a value indicating whether the exception will be recorded or not. + /// </summary> + public bool RecordException { get; set; } } }
1
// <copyright file="AspNetCoreInstrumentationOptions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using Microsoft.AspNetCore.Http; using OpenTelemetry.Context.Propagation; namespace OpenTelemetry.Instrumentation.AspNetCore { /// <summary> /// Options for requests instrumentation. /// </summary> public class AspNetCoreInstrumentationOptions { /// <summary> /// Gets or sets <see cref="IPropagator"/> for context propagation. Default value: <see cref="CompositePropagator"/> with <see cref="TextMapPropagator"/> &amp; <see cref="BaggagePropagator"/>. /// </summary> public IPropagator Propagator { get; set; } = new CompositePropagator(new IPropagator[] { new TextMapPropagator(), new BaggagePropagator(), }); /// <summary> /// Gets or sets a Filter function to filter instrumentation for requests on a per request basis. /// The Filter gets the HttpContext, and should return a boolean. /// If Filter returns true, the request is collected. /// If Filter returns false or throw exception, the request is filtered out. /// </summary> public Func<HttpContext, bool> Filter { get; set; } /// <summary> /// Gets or sets an action to enrich an Activity. /// </summary> /// <remarks> /// <para><see cref="Activity"/>: the activity being enriched.</para> /// <para>string: the name of the event.</para> /// <para>object: the raw object from which additional information can be extracted to enrich the activity. /// The type of this object depends on the event, which is given by the above parameter.</para> /// </remarks> public Action<Activity, string, object> Enrich { get; set; } } }
1
17,659
Any thoughts on making this false by default? My suggestion is make this opt-in. Storing exception is somewhat expensive, so lets do this only if users opt-in
open-telemetry-opentelemetry-dotnet
.cs
@@ -88,7 +88,7 @@ public final class HttpCall<V> extends Call.Base<V> { } else { try { callback.onSuccess(parseResponse(response, bodyConverter)); - } catch (IOException e) { + } catch (Throwable e) { callback.onError(e); } }
1
/* * Copyright 2015-2019 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin2.elasticsearch.internal.client; import com.linecorp.armeria.client.HttpClient; import com.linecorp.armeria.common.AggregatedHttpRequest; import com.linecorp.armeria.common.AggregatedHttpResponse; import com.linecorp.armeria.common.HttpData; import com.linecorp.armeria.common.HttpResponse; import com.linecorp.armeria.common.HttpStatusClass; import com.linecorp.armeria.common.RequestContext; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufHolder; import io.netty.util.ReferenceCountUtil; import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Semaphore; import zipkin2.Call; import zipkin2.Callback; public final class HttpCall<V> extends Call.Base<V> { public interface BodyConverter<V> { V convert(ByteBuffer content) throws IOException; } public static class Factory { final HttpClient httpClient; public Factory(HttpClient httpClient) { this.httpClient = httpClient; } public <V> HttpCall<V> newCall(AggregatedHttpRequest request, BodyConverter<V> bodyConverter) { return new HttpCall<>(httpClient, request, bodyConverter); } } public final AggregatedHttpRequest request; public final BodyConverter<V> bodyConverter; final HttpClient httpClient; volatile CompletableFuture<AggregatedHttpResponse> responseFuture; HttpCall(HttpClient httpClient, AggregatedHttpRequest request, BodyConverter<V> bodyConverter) { this.httpClient = httpClient; if (request.content() instanceof ByteBufHolder) { // Unfortunately it's not possible to use pooled objects in requests and support clone() after // sending the request. ByteBuf buf = ((ByteBufHolder) request.content()).content(); try { this.request = AggregatedHttpRequest.of( request.headers(), HttpData.copyOf(buf), request.trailers()); } finally { buf.release(); } } else { this.request = request; } this.bodyConverter = bodyConverter; } @Override protected V doExecute() throws IOException { AggregatedHttpResponse response = sendRequest().join(); return parseResponse(response, bodyConverter); } @Override protected void doEnqueue(Callback<V> callback) { sendRequest().handle((response, t) -> { if (t != null) { callback.onError(t); } else { try { callback.onSuccess(parseResponse(response, bodyConverter)); } catch (IOException e) { callback.onError(e); } } return null; }); } @Override protected void doCancel() { CompletableFuture<AggregatedHttpResponse> responseFuture = this.responseFuture; if (responseFuture != null) { responseFuture.cancel(false); } } @Override public HttpCall<V> clone() { return new HttpCall<>(httpClient, request, bodyConverter); } @Override public String toString() { return "HttpCall(" + request + ")"; } CompletableFuture<AggregatedHttpResponse> sendRequest() { HttpResponse response = httpClient.execute(request); CompletableFuture<AggregatedHttpResponse> responseFuture = RequestContext.mapCurrent( ctx -> response.aggregateWithPooledObjects(ctx.eventLoop(), ctx.alloc()), // This should never be used in practice since the module runs in an Armeria server. response::aggregate); this.responseFuture = responseFuture; return responseFuture; } <V> V parseResponse(AggregatedHttpResponse response, BodyConverter<V> bodyConverter) throws IOException { if (response.content().isEmpty()) { if (response.status().codeClass().equals(HttpStatusClass.SUCCESS)) { return null; } else { throw new IllegalStateException("response failed: " + response); } } HttpData content = response.content(); try { if (response.status().codeClass().equals(HttpStatusClass.SUCCESS)) { final ByteBuffer buf; if (content instanceof ByteBufHolder) { buf = ((ByteBufHolder) content).content().nioBuffer(); } else { buf = ByteBuffer.wrap(content.array()); } return bodyConverter.convert(buf); } else { throw new IllegalStateException( "response for " + request.path() + " failed: " + response.contentUtf8()); } } finally { ReferenceCountUtil.safeRelease(content); } } }
1
15,310
usually I do either ` IOException|RuntimeException` or if doing Throwable, use propagateIfFatal
openzipkin-zipkin
java
@@ -24,10 +24,14 @@ class Overlayable(object): def __mul__(self, other): if type(other).__name__ == 'DynamicMap': - from ..util import Dynamic - def dynamic_mul(element): + from .spaces import Callable + def dynamic_mul(*args, **kwargs): + element = other[args] return self * element - return Dynamic(other, operation=dynamic_mul) + callback = Callable(callable_function=dynamic_mul, + inputs=[self, other]) + return other.clone(shared_data=False, callback=callback, + streams=[]) if isinstance(other, UniformNdMapping) and not isinstance(other, CompositeOverlay): items = [(k, self * v) for (k, v) in other.items()] return other.clone(items)
1
""" Supplies Layer and related classes that allow overlaying of Views, including Overlay. A Layer is the final extension of View base class that allows Views to be overlayed on top of each other. Also supplies ViewMap which is the primary multi-dimensional Map type for indexing, slicing and animating collections of Views. """ from functools import reduce import numpy as np import param from .dimension import Dimension, Dimensioned, ViewableElement from .ndmapping import UniformNdMapping from .layout import Composable, Layout from .util import sanitize_identifier, unique_array class Overlayable(object): """ Overlayable provides a mix-in class to support the mul operation for overlaying multiple elements. """ def __mul__(self, other): if type(other).__name__ == 'DynamicMap': from ..util import Dynamic def dynamic_mul(element): return self * element return Dynamic(other, operation=dynamic_mul) if isinstance(other, UniformNdMapping) and not isinstance(other, CompositeOverlay): items = [(k, self * v) for (k, v) in other.items()] return other.clone(items) self_item = [((self.group, self.label if self.label else 'I'), self)] other_items = (other.items() if isinstance(other, Overlay) else [((other.group, other.label if other.label else 'I'), other)]) return Overlay(items=Overlay.relabel_item_paths(list(self_item) + list(other_items))) class CompositeOverlay(ViewableElement, Composable): """ CompositeOverlay provides a common baseclass for Overlay classes. """ _deep_indexable = True def hist(self, index=0, adjoin=True, dimension=None, **kwargs): valid_ind = isinstance(index, int) and (0 <= index < len(self)) valid_label = index in [el.label for el in self] if not any([valid_ind, valid_label]): raise TypeError("Please supply a suitable index or label for the histogram data") hists = self.get(index).hist(adjoin=False, dimension=dimension, **kwargs) if not isinstance(hists, Layout): hists = [hists] if not isinstance(dimension, list): dimension = ['Default'] if adjoin: layout = self for hist in hists: layout = layout << hist layout.main_layer = index elif len(dimension) > 1: layout = hists else: layout = hists[0] return layout def dimension_values(self, dimension, expanded=True, flat=True): values = [] found = False for el in self: if dimension in el.dimensions(label=True): values.append(el.dimension_values(dimension)) found = True if not found: return super(CompositeOverlay, self).dimension_values(dimension, expanded, flat) values = [v for v in values if v is not None and len(v)] if not values: return np.array() vals = np.concatenate(values) return vals if expanded else unique_array(vals) class Overlay(Layout, CompositeOverlay): """ An Overlay consists of multiple Views (potentially of heterogeneous type) presented one on top each other with a particular z-ordering. Overlays along with Views constitute the only valid leaf types of a Layout and in fact extend the Layout structure. Overlays are constructed using the * operator (building an identical structure to the + operator) and are the only objects that inherit both from Layout and CompositeOverlay. """ @classmethod def _from_values(cls, val): return reduce(lambda x,y: x*y, val).map(lambda x: x.display('auto'), [Overlay]) def __init__(self, items=None, group=None, label=None, **params): view_params = ViewableElement.params().keys() self.__dict__['_fixed'] = False self.__dict__['_group'] = group self.__dict__['_label'] = label Layout.__init__(self, items, **{k:v for k,v in params.items() if k not in view_params}) ViewableElement.__init__(self, self.data, **{k:v for k,v in params.items() if k in view_params}) def __getitem__(self, key): """ Allows transparently slicing the Elements in the Overlay to select specific layers in an Overlay use the .get method. """ return Overlay([(k, v[key]) for k, v in self.items()]) def get(self, identifier, default=None): if isinstance(identifier, int): values = list(self.data.values()) if 0 <= identifier < len(values): return values[identifier] else: return default return super(Overlay, self).get(identifier, default) def __add__(self, other): return Layout.from_values(self) + Layout.from_values(other) def __mul__(self, other): if isinstance(other, Overlay): items = list(self.data.items()) + list(other.data.items()) elif isinstance(other, ViewableElement): label = other.label if other.label else 'I' items = list(self.data.items()) + [((other.group, label), other)] elif isinstance(other, UniformNdMapping): raise NotImplementedError return Overlay(items=self.relabel_item_paths(items)).display('all') def collate(self): """ Collates any objects in the Overlay resolving any issues the recommended nesting structure. """ return reduce(lambda x,y: x*y, self.values()) def collapse(self, function): """ Collapses all the Elements in the Overlay using the supplied function if they share a common type and group. """ elements = list(self) types = [type(el) for el in elements] values = [el.group for el in elements] if not len(set(types)) == 1 and len(set(values)) == 1: raise Exception("Overlay is not homogenous in type or group " "and cannot be collapsed.") else: return elements[0].clone(types[0].collapse_data([el.data for el in elements], function, self.kdims)) @property def group(self): if self._group: return self._group elements = [el for el in self if not el._auxiliary_component] values = {el.group for el in elements} types = {type(el) for el in elements} if values: group = list(values)[0] vtype = list(types)[0].__name__ else: group, vtype = [], '' if len(values) == 1 and group != vtype: return group else: return type(self).__name__ @group.setter def group(self, group): if not sanitize_identifier.allowable(group): raise ValueError("Supplied group %s contains invalid characters." % group) else: self._group = group @property def label(self): if self._label: return self._label labels = {el.label for el in self if not el._auxiliary_component} if len(labels) == 1: return list(labels)[0] else: return '' @label.setter def label(self, label): if not sanitize_identifier.allowable(label): raise ValueError("Supplied group %s contains invalid characters." % label) self._label = label @property def ddims(self): dimensions = [] dimension_names = [] for el in self: for dim in el.dimensions(): if dim.name not in dimension_names: dimensions.append(dim) dimension_names.append(dim.name) return dimensions @property def shape(self): raise NotImplementedError class NdOverlay(UniformNdMapping, CompositeOverlay, Overlayable): """ An NdOverlay allows a group of NdOverlay to be overlaid together. NdOverlay can be indexed out of an overlay and an overlay is an iterable that iterates over the contained layers. """ kdims = param.List(default=[Dimension('Element')], constant=True, doc=""" List of dimensions the NdOverlay can be indexed by.""") _deep_indexable = True def __init__(self, overlays=None, **params): super(NdOverlay, self).__init__(overlays, **params) __all__ = list(set([_k for _k, _v in locals().items() if isinstance(_v, type) and issubclass(_v, Dimensioned)])) + ['Overlayable']
1
15,761
Wondering whether it should be `self.clone`, or `other.clone` or maybe a new `DynamicMap` declaration entirely. I see this is in the condition where `other` is a `DynamicMap`but is this definitely right in terms of `kdims`? I need to think about it more...
holoviz-holoviews
py
@@ -34,7 +34,7 @@ public interface SolrInfoBean { * Category of Solr component. */ enum Category { CONTAINER, ADMIN, CORE, QUERY, UPDATE, CACHE, HIGHLIGHTER, QUERYPARSER, SPELLCHECKER, - SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER } + SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, SECURITY, OTHER } /** * Top-level group of beans or metrics for a subsystem.
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.core; import java.util.Map; import java.util.Set; import com.codahale.metrics.MetricRegistry; import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.util.stats.MetricUtils; /** * Interface for getting various ui friendly strings * for use by objects which are 'pluggable' to make server administration * easier. */ public interface SolrInfoBean { /** * Category of Solr component. */ enum Category { CONTAINER, ADMIN, CORE, QUERY, UPDATE, CACHE, HIGHLIGHTER, QUERYPARSER, SPELLCHECKER, SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER } /** * Top-level group of beans or metrics for a subsystem. */ enum Group { jvm, jetty, node, core, collection, shard, cluster, overseer } /** * Simple common usage name, e.g. BasicQueryHandler, * or fully qualified class name. */ String getName(); /** Simple one or two line description */ String getDescription(); /** Category of this component */ Category getCategory(); /** Optionally return a snapshot of metrics that this component reports, or null. * Default implementation requires that both {@link #getMetricNames()} and * {@link #getMetricRegistry()} return non-null values. */ default Map<String, Object> getMetricsSnapshot() { if (getMetricRegistry() == null || getMetricNames() == null) { return null; } return MetricUtils.convertMetrics(getMetricRegistry(), getMetricNames()); } /** * Modifiable set of metric names that this component reports (default is null, * which means none). If not null then this set is used by {@link #registerMetricName(String)} * to capture what metrics names are reported from this component. * <p><b>NOTE: this set has to allow iteration under modifications.</b></p> */ default Set<String> getMetricNames() { return null; } /** * An instance of {@link MetricRegistry} that this component uses for metrics reporting * (default is null, which means no registry). */ default MetricRegistry getMetricRegistry() { return null; } /** Register a metric name that this component reports. This method is called by various * metric registration methods in {@link org.apache.solr.metrics.SolrMetricManager} in order * to capture what metric names are reported from this component (which in turn is called * from {@link org.apache.solr.metrics.SolrMetricProducer#initializeMetrics(SolrMetricManager, String, String, String)}). * <p>Default implementation registers all metrics added by a component. Implementations may * override this to avoid reporting some or all metrics returned by {@link #getMetricsSnapshot()}</p> */ default void registerMetricName(String name) { Set<String> names = getMetricNames(); if (names != null) { names.add(name); } } }
1
27,773
I chose a new category because over time I'd like to add metrics also for Authorization plugins and Auditlog plugins (all components registered in security.json). An alternative could have been `CONTAINER` I guess?
apache-lucene-solr
java