patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -119,7 +119,7 @@ type Node struct { // Incoming messages for block mining. MsgPool *core.MessagePool // Messages sent and not yet mined. - Outbox *core.MessageQueue + MsgQueue *core.MessageQueue Wallet *wallet.Wallet
1
package node import ( "context" "encoding/json" "fmt" "os" "sync" "time" ps "github.com/cskr/pubsub" "github.com/ipfs/go-bitswap" bsnet "github.com/ipfs/go-bitswap/network" bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-hamt-ipld" bstore "github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-exchange-interface" "github.com/ipfs/go-ipfs-exchange-offline" offroute "github.com/ipfs/go-ipfs-routing/offline" logging "github.com/ipfs/go-log" "github.com/ipfs/go-merkledag" "github.com/libp2p/go-libp2p" autonatsvc "github.com/libp2p/go-libp2p-autonat-svc" circuit "github.com/libp2p/go-libp2p-circuit" "github.com/libp2p/go-libp2p-host" "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p-kad-dht/opts" p2pmetrics "github.com/libp2p/go-libp2p-metrics" libp2ppeer "github.com/libp2p/go-libp2p-peer" dhtprotocol "github.com/libp2p/go-libp2p-protocol" libp2pps "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p-routing" rhost "github.com/libp2p/go-libp2p/p2p/host/routed" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/actor/builtin" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/chain" "github.com/filecoin-project/go-filecoin/config" "github.com/filecoin-project/go-filecoin/consensus" "github.com/filecoin-project/go-filecoin/core" "github.com/filecoin-project/go-filecoin/flags" "github.com/filecoin-project/go-filecoin/metrics" "github.com/filecoin-project/go-filecoin/mining" "github.com/filecoin-project/go-filecoin/net" "github.com/filecoin-project/go-filecoin/net/pubsub" "github.com/filecoin-project/go-filecoin/paths" "github.com/filecoin-project/go-filecoin/plumbing" "github.com/filecoin-project/go-filecoin/plumbing/bcf" "github.com/filecoin-project/go-filecoin/plumbing/cfg" "github.com/filecoin-project/go-filecoin/plumbing/dag" "github.com/filecoin-project/go-filecoin/plumbing/msg" "github.com/filecoin-project/go-filecoin/plumbing/strgdls" "github.com/filecoin-project/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/proofs" "github.com/filecoin-project/go-filecoin/proofs/sectorbuilder" "github.com/filecoin-project/go-filecoin/protocol/block" "github.com/filecoin-project/go-filecoin/protocol/hello" "github.com/filecoin-project/go-filecoin/protocol/retrieval" "github.com/filecoin-project/go-filecoin/protocol/storage" "github.com/filecoin-project/go-filecoin/repo" "github.com/filecoin-project/go-filecoin/sampling" "github.com/filecoin-project/go-filecoin/state" "github.com/filecoin-project/go-filecoin/types" "github.com/filecoin-project/go-filecoin/wallet" ) const ( filecoinDHTProtocol dhtprotocol.ID = "/fil/kad/1.0.0" ) var log = logging.Logger("node") // nolint: deadcode var ( // ErrNoMinerAddress is returned when the node is not configured to have any miner addresses. ErrNoMinerAddress = errors.New("no miner addresses configured") ) type pubSubProcessorFunc func(ctx context.Context, msg pubsub.Message) error type nodeChainReader interface { GenesisCid() cid.Cid GetBlock(context.Context, cid.Cid) (*types.Block, error) GetHead() types.SortedCidSet GetTipSet(types.SortedCidSet) (*types.TipSet, error) GetTipSetStateRoot(tsKey types.SortedCidSet) (cid.Cid, error) HeadEvents() *ps.PubSub Load(context.Context) error Stop() } // Node represents a full Filecoin node. type Node struct { host host.Host PeerHost host.Host Consensus consensus.Protocol ChainReader nodeChainReader Syncer chain.Syncer PowerTable consensus.PowerTableView BlockMiningAPI *block.MiningAPI PorcelainAPI *porcelain.API RetrievalAPI *retrieval.API StorageAPI *storage.API // HeavyTipSetCh is a subscription to the heaviest tipset topic on the chain. HeaviestTipSetCh chan interface{} // HeavyTipSetHandled is a hook for tests because pubsub notifications // arrive async. It's called after handling a new heaviest tipset. // Remove this after replacing the tipset "pubsub" with a synchronous event bus: // https://github.com/filecoin-project/go-filecoin/issues/2309 HeaviestTipSetHandled func() // Incoming messages for block mining. MsgPool *core.MessagePool // Messages sent and not yet mined. Outbox *core.MessageQueue Wallet *wallet.Wallet // Mining stuff. AddNewlyMinedBlock newBlockFunc blockTime time.Duration cancelMining context.CancelFunc MiningWorker mining.Worker MiningScheduler mining.Scheduler mining struct { sync.Mutex isMining bool } miningCtx context.Context miningDoneWg *sync.WaitGroup // Storage Market Interfaces StorageMiner *storage.Miner // Retrieval Interfaces RetrievalMiner *retrieval.Miner // Network Fields BlockSub pubsub.Subscription MessageSub pubsub.Subscription HelloSvc *hello.Handler Bootstrapper *net.Bootstrapper // Data Storage Fields // Repo is the repo this node was created with // it contains all persistent artifacts of the filecoin node Repo repo.Repo // SectorBuilder is used by the miner to fill and seal sectors. sectorBuilder sectorbuilder.SectorBuilder // Fetcher is the interface for fetching data from nodes. Fetcher *net.Fetcher // Exchange is the interface for fetching data from other nodes. Exchange exchange.Interface // Blockstore is the un-networked blocks interface Blockstore bstore.Blockstore // Blockservice is a higher level interface for fetching data blockservice bserv.BlockService // CborStore is a temporary interface for interacting with IPLD objects. cborStore *hamt.CborIpldStore // cancelSubscriptionsCtx is a handle to cancel the block and message subscriptions. cancelSubscriptionsCtx context.CancelFunc // OfflineMode, when true, disables libp2p OfflineMode bool // Router is a router from IPFS Router routing.IpfsRouting } // Config is a helper to aid in the construction of a filecoin node. type Config struct { BlockTime time.Duration Libp2pOpts []libp2p.Option OfflineMode bool Verifier proofs.Verifier Rewarder consensus.BlockRewarder Repo repo.Repo IsRelay bool } // ConfigOpt is a configuration option for a filecoin node. type ConfigOpt func(*Config) error // OfflineMode enables or disables offline mode. func OfflineMode(offlineMode bool) ConfigOpt { return func(c *Config) error { c.OfflineMode = offlineMode return nil } } // IsRelay configures node to act as a libp2p relay. func IsRelay() ConfigOpt { return func(c *Config) error { c.IsRelay = true return nil } } // BlockTime sets the blockTime. func BlockTime(blockTime time.Duration) ConfigOpt { return func(c *Config) error { c.BlockTime = blockTime return nil } } // Libp2pOptions returns a node config option that sets up the libp2p node func Libp2pOptions(opts ...libp2p.Option) ConfigOpt { return func(nc *Config) error { // Quietly having your options overridden leads to hair loss if len(nc.Libp2pOpts) > 0 { panic("Libp2pOptions can only be called once") } nc.Libp2pOpts = opts return nil } } // VerifierConfigOption returns a function that sets the verifier to use in the node consensus func VerifierConfigOption(verifier proofs.Verifier) ConfigOpt { return func(c *Config) error { c.Verifier = verifier return nil } } // RewarderConfigOption returns a function that sets the rewarder to use in the node consensus func RewarderConfigOption(rewarder consensus.BlockRewarder) ConfigOpt { return func(c *Config) error { c.Rewarder = rewarder return nil } } // New creates a new node. func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) { n := &Config{} for _, o := range opts { if err := o(n); err != nil { return nil, err } } return n.Build(ctx) } type blankValidator struct{} func (blankValidator) Validate(_ string, _ []byte) error { return nil } func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } // readGenesisCid is a helper function that queries the provided datastore for // an entry with the genesisKey cid, returning if found. func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) { bb, err := ds.Get(chain.GenesisKey) if err != nil { return cid.Undef, errors.Wrap(err, "failed to read genesisKey") } var c cid.Cid err = json.Unmarshal(bb, &c) if err != nil { return cid.Undef, errors.Wrap(err, "failed to cast genesisCid") } return c, nil } // buildHost determines if we are publically dialable. If so use public // Address, if not configure node to announce relay address. func (nc *Config) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.IpfsRouting, error)) (host.Host, error) { // Node must build a host acting as a libp2p relay. Additionally it // runs the autoNAT service which allows other nodes to check for their // own dialability by having this node attempt to dial them. makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) { return makeDHT(h) } if nc.IsRelay { cfg := nc.Repo.Config() publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress) if err != nil { return nil, err } publicAddrFactory := func(lc *libp2p.Config) error { lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { if cfg.Swarm.PublicRelayAddress == "" { return addrs } return append(addrs, publicAddr) } return nil } relayHost, err := libp2p.New( ctx, libp2p.EnableRelay(circuit.OptHop), libp2p.EnableAutoRelay(), libp2p.Routing(makeDHTRightType), publicAddrFactory, libp2p.ChainOptions(nc.Libp2pOpts...), ) if err != nil { return nil, err } // Set up autoNATService as a streamhandler on the host. _, err = autonatsvc.NewAutoNATService(ctx, relayHost) if err != nil { return nil, err } return relayHost, nil } return libp2p.New( ctx, libp2p.EnableAutoRelay(), libp2p.Routing(makeDHTRightType), libp2p.ChainOptions(nc.Libp2pOpts...), ) } // Build instantiates a filecoin Node from the settings specified in the config. func (nc *Config) Build(ctx context.Context) (*Node, error) { if nc.Repo == nil { nc.Repo = repo.NewInMemoryRepo() } bs := bstore.NewBlockstore(nc.Repo.Datastore()) validator := blankValidator{} var peerHost host.Host var router routing.IpfsRouting bandwidthTracker := p2pmetrics.NewBandwidthCounter() nc.Libp2pOpts = append(nc.Libp2pOpts, libp2p.BandwidthReporter(bandwidthTracker)) if !nc.OfflineMode { makeDHT := func(h host.Host) (routing.IpfsRouting, error) { r, err := dht.New( ctx, h, dhtopts.Datastore(nc.Repo.Datastore()), dhtopts.NamespacedValidator("v", validator), dhtopts.Protocols(filecoinDHTProtocol), ) if err != nil { return nil, errors.Wrap(err, "failed to setup routing") } router = r return r, err } var err error peerHost, err = nc.buildHost(ctx, makeDHT) if err != nil { return nil, err } } else { router = offroute.NewOfflineRouter(nc.Repo.Datastore(), validator) peerHost = rhost.Wrap(noopLibP2PHost{}, router) } // set up pinger pingService := ping.NewPingService(peerHost) // set up bitswap nwork := bsnet.NewFromIpfsHost(peerHost, router) //nwork := bsnet.NewFromIpfsHost(innerHost, router) bswap := bitswap.New(ctx, nwork, bs) bservice := bserv.New(bs, bswap) fetcher := net.NewFetcher(ctx, bservice) cstOffline := hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))} genCid, err := readGenesisCid(nc.Repo.Datastore()) if err != nil { return nil, err } // set up chainstore chainStore := chain.NewDefaultStore(nc.Repo.ChainDatastore(), genCid) powerTable := &consensus.MarketView{} // set up processor var processor consensus.Processor if nc.Rewarder == nil { processor = consensus.NewDefaultProcessor() } else { processor = consensus.NewConfiguredProcessor(consensus.NewDefaultMessageValidator(), nc.Rewarder) } // set up consensus var nodeConsensus consensus.Protocol if nc.Verifier == nil { nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, &proofs.RustVerifier{}) } else { nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, nc.Verifier) } chainFacade := bcf.NewBlockChainFacade(chainStore, &cstOffline) // only the syncer gets the storage which is online connected chainSyncer := chain.NewDefaultSyncer(&cstOffline, nodeConsensus, chainStore, fetcher) msgPool := core.NewMessagePool(chainStore, nc.Repo.Config().Mpool, consensus.NewIngestionValidator(chainFacade, nc.Repo.Config().Mpool)) outbox := core.NewMessageQueue() // Set up libp2p pubsub fsub, err := libp2pps.NewFloodSub(ctx, peerHost) if err != nil { return nil, errors.Wrap(err, "failed to set up pubsub") } backend, err := wallet.NewDSBackend(nc.Repo.WalletDatastore()) if err != nil { return nil, errors.Wrap(err, "failed to set up wallet backend") } fcWallet := wallet.New(backend) PorcelainAPI := porcelain.New(plumbing.New(&plumbing.APIDeps{ Bitswap: bswap, Chain: chainFacade, Config: cfg.NewConfig(nc.Repo), DAG: dag.NewDAG(merkledag.NewDAGService(bservice)), Deals: strgdls.New(nc.Repo.DealsDatastore()), MsgPool: msgPool, MsgPreviewer: msg.NewPreviewer(fcWallet, chainStore, &cstOffline, bs), MsgQueryer: msg.NewQueryer(nc.Repo, fcWallet, chainStore, &cstOffline, bs), MsgSender: msg.NewSender(fcWallet, chainStore, &cstOffline, chainStore, outbox, msgPool, consensus.NewOutboundMessageValidator(), fsub.Publish), MsgWaiter: msg.NewWaiter(chainStore, bs, &cstOffline), Network: net.New(peerHost, pubsub.NewPublisher(fsub), pubsub.NewSubscriber(fsub), net.NewRouter(router), bandwidthTracker, net.NewPinger(peerHost, pingService)), Outbox: outbox, Wallet: fcWallet, })) nd := &Node{ blockservice: bservice, Blockstore: bs, cborStore: &cstOffline, Consensus: nodeConsensus, ChainReader: chainStore, Syncer: chainSyncer, PowerTable: powerTable, PorcelainAPI: PorcelainAPI, Fetcher: fetcher, Exchange: bswap, host: peerHost, MsgPool: msgPool, Outbox: outbox, OfflineMode: nc.OfflineMode, PeerHost: peerHost, Repo: nc.Repo, Wallet: fcWallet, blockTime: nc.BlockTime, Router: router, } // Bootstrapping network peers. periodStr := nd.Repo.Config().Bootstrap.Period period, err := time.ParseDuration(periodStr) if err != nil { return nil, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr) } // Bootstrapper maintains connections to some subset of addresses ba := nd.Repo.Config().Bootstrap.Addresses bpi, err := net.PeerAddrsToPeerInfos(ba) if err != nil { return nil, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba) } minPeerThreshold := nd.Repo.Config().Bootstrap.MinPeerThreshold nd.Bootstrapper = net.NewBootstrapper(bpi, nd.Host(), nd.Host().Network(), nd.Router, minPeerThreshold, period) return nd, nil } // Start boots up the node. func (node *Node) Start(ctx context.Context) error { if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil { return errors.Wrap(err, "failed to setup metrics") } if err := metrics.RegisterJaeger(node.host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil { return errors.Wrap(err, "failed to setup tracing") } var err error if err = node.ChainReader.Load(ctx); err != nil { return err } // Only set these up if there is a miner configured. if _, err := node.miningAddress(); err == nil { if err := node.setupMining(ctx); err != nil { log.Errorf("setup mining failed: %v", err) return err } } // Start up 'hello' handshake service syncCallBack := func(pid libp2ppeer.ID, cids []cid.Cid, height uint64) { cidSet := types.NewSortedCidSet(cids...) err := node.Syncer.HandleNewTipset(context.Background(), cidSet) if err != nil { log.Infof("error handling blocks: %s", cidSet.String()) } } node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), syncCallBack, node.PorcelainAPI.ChainHead, node.Repo.Config().Net, flags.Commit) err = node.setupProtocols() if err != nil { return errors.Wrap(err, "failed to set up protocols:") } node.RetrievalMiner = retrieval.NewMiner(node) // subscribe to block notifications blkSub, err := node.PorcelainAPI.PubSubSubscribe(BlockTopic) if err != nil { return errors.Wrap(err, "failed to subscribe to blocks topic") } node.BlockSub = blkSub // subscribe to message notifications msgSub, err := node.PorcelainAPI.PubSubSubscribe(msg.Topic) if err != nil { return errors.Wrap(err, "failed to subscribe to message topic") } node.MessageSub = msgSub cctx, cancel := context.WithCancel(context.Background()) node.cancelSubscriptionsCtx = cancel go node.handleSubscription(cctx, node.processBlock, "processBlock", node.BlockSub, "BlockSub") go node.handleSubscription(cctx, node.processMessage, "processMessage", node.MessageSub, "MessageSub") outboxPolicy := core.NewMessageQueuePolicy(node.Outbox, node.ChainReader, core.OutboxMaxAgeRounds) node.HeaviestTipSetHandled = func() {} node.HeaviestTipSetCh = node.ChainReader.HeadEvents().Sub(chain.NewHeadTopic) head, err := node.PorcelainAPI.ChainHead() if err != nil { return errors.Wrap(err, "failed to get chain head") } go node.handleNewHeaviestTipSet(cctx, *head, outboxPolicy) if !node.OfflineMode { node.Bootstrapper.Start(context.Background()) } if err := node.setupHeartbeatServices(ctx); err != nil { return errors.Wrap(err, "failed to start heartbeat services") } return nil } func (node *Node) setupHeartbeatServices(ctx context.Context) error { mag := func() address.Address { addr, err := node.miningAddress() // the only error miningAddress() returns is ErrNoMinerAddress. // if there is no configured miner address, simply send a zero // address across the wire. if err != nil { return address.Undef } return addr } // start the primary heartbeat service if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 { hbs := metrics.NewHeartbeatService(node.Host(), node.Repo.Config().Heartbeat, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag)) go hbs.Start(ctx) } // check if we want to connect to an alert service. An alerting service is a heartbeat // service that can trigger alerts based on the contents of heatbeats. if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 { ahbs := metrics.NewHeartbeatService(node.Host(), &config.HeartbeatConfig{ BeatTarget: alertTarget, BeatPeriod: "10s", ReconnectPeriod: "10s", Nickname: node.Repo.Config().Heartbeat.Nickname, }, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag)) go ahbs.Start(ctx) } return nil } func (node *Node) setupMining(ctx context.Context) error { // initialize a sector builder sectorBuilder, err := initSectorBuilderForNode(ctx, node) if err != nil { return errors.Wrap(err, "failed to initialize sector builder") } node.sectorBuilder = sectorBuilder return nil } func (node *Node) setIsMining(isMining bool) { node.mining.Lock() defer node.mining.Unlock() node.mining.isMining = isMining } func (node *Node) handleNewMiningOutput(miningOutCh <-chan mining.Output) { defer func() { node.miningDoneWg.Done() }() for { select { case <-node.miningCtx.Done(): return case output, ok := <-miningOutCh: if !ok { return } if output.Err != nil { log.Errorf("stopping mining. error: %s", output.Err.Error()) node.StopMining(context.Background()) } else { node.miningDoneWg.Add(1) go func() { if node.IsMining() { node.AddNewlyMinedBlock(node.miningCtx, output.NewBlock) } node.miningDoneWg.Done() }() } } } } func (node *Node) handleNewHeaviestTipSet(ctx context.Context, head types.TipSet, outboxPolicy *core.MessageQueuePolicy) { for { select { case ts, ok := <-node.HeaviestTipSetCh: if !ok { return } newHead, ok := ts.(types.TipSet) if !ok { log.Error("non-tipset published on heaviest tipset channel") continue } if len(newHead) == 0 { log.Error("tipset of size 0 published on heaviest tipset channel. ignoring and waiting for a new heaviest tipset.") continue } if err := outboxPolicy.OnNewHeadTipset(ctx, head, newHead); err != nil { log.Error("updating outbound message queue for new tipset", err) } if err := node.MsgPool.UpdateMessagePool(ctx, node.ChainReader, head, newHead); err != nil { log.Error("updating message pool for new tipset", err) } head = newHead if node.StorageMiner != nil { node.StorageMiner.OnNewHeaviestTipSet(newHead) } node.HeaviestTipSetHandled() case <-ctx.Done(): return } } } func (node *Node) cancelSubscriptions() { if node.BlockSub != nil || node.MessageSub != nil { node.cancelSubscriptionsCtx() } if node.BlockSub != nil { node.BlockSub.Cancel() node.BlockSub = nil } if node.MessageSub != nil { node.MessageSub.Cancel() node.MessageSub = nil } } // Stop initiates the shutdown of the node. func (node *Node) Stop(ctx context.Context) { node.ChainReader.HeadEvents().Unsub(node.HeaviestTipSetCh) node.StopMining(ctx) node.cancelSubscriptions() node.ChainReader.Stop() if node.SectorBuilder() != nil { if err := node.SectorBuilder().Close(); err != nil { fmt.Printf("error closing sector builder: %s\n", err) } node.sectorBuilder = nil } if err := node.Host().Close(); err != nil { fmt.Printf("error closing host: %s\n", err) } if err := node.Repo.Close(); err != nil { fmt.Printf("error closing repo: %s\n", err) } node.Bootstrapper.Stop() fmt.Println("stopping filecoin :(") } type newBlockFunc func(context.Context, *types.Block) func (node *Node) addNewlyMinedBlock(ctx context.Context, b *types.Block) { log.Debugf("Got a newly mined block from the mining worker: %s", b) if err := node.AddNewBlock(ctx, b); err != nil { log.Warningf("error adding new mined block: %s. err: %s", b.Cid().String(), err.Error()) } } // miningAddress returns the address of the mining actor mining on behalf of // the node. func (node *Node) miningAddress() (address.Address, error) { addr := node.Repo.Config().Mining.MinerAddress if addr.Empty() { return address.Undef, ErrNoMinerAddress } return addr, nil } // MiningTimes returns the configured time it takes to mine a block, and also // the mining delay duration, which is currently a fixed fraction of block time. // Note this is mocked behavior, in production this time is determined by how // long it takes to generate PoSTs. func (node *Node) MiningTimes() (time.Duration, time.Duration) { mineDelay := node.GetBlockTime() / mining.MineDelayConversionFactor return node.GetBlockTime(), mineDelay } // GetBlockTime returns the current block time. // TODO this should be surfaced somewhere in the plumbing API. func (node *Node) GetBlockTime() time.Duration { return node.blockTime } // SetBlockTime sets the block time. func (node *Node) SetBlockTime(blockTime time.Duration) { node.blockTime = blockTime } // StartMining causes the node to start feeding blocks to the mining worker and initializes // the SectorBuilder for the mining address. func (node *Node) StartMining(ctx context.Context) error { if node.IsMining() { return errors.New("Node is already mining") } minerAddr, err := node.miningAddress() if err != nil { return errors.Wrap(err, "failed to get mining address") } // ensure we have a sector builder if node.SectorBuilder() == nil { if err := node.setupMining(ctx); err != nil { return err } } minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr) if err != nil { return errors.Wrapf(err, "failed to get mining owner address for miner %s", minerAddr) } _, mineDelay := node.MiningTimes() if node.MiningWorker == nil { if node.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil { return err } } if node.MiningScheduler == nil { node.MiningScheduler = mining.NewScheduler(node.MiningWorker, mineDelay, node.PorcelainAPI.ChainHead) } // paranoid check if !node.MiningScheduler.IsStarted() { node.miningCtx, node.cancelMining = context.WithCancel(context.Background()) outCh, doneWg := node.MiningScheduler.Start(node.miningCtx) node.miningDoneWg = doneWg node.AddNewlyMinedBlock = node.addNewlyMinedBlock node.miningDoneWg.Add(1) go node.handleNewMiningOutput(outCh) } // initialize a storage miner storageMiner, err := initStorageMinerForNode(ctx, node) if err != nil { return errors.Wrap(err, "failed to initialize storage miner") } node.StorageMiner = storageMiner // loop, turning sealing-results into commitSector messages to be included // in the chain go func() { for { select { case result := <-node.SectorBuilder().SectorSealResults(): if result.SealingErr != nil { log.Errorf("failed to seal sector with id %d: %s", result.SectorID, result.SealingErr.Error()) } else if result.SealingResult != nil { // TODO: determine these algorithmically by simulating call and querying historical prices gasPrice := types.NewGasPrice(1) gasUnits := types.NewGasUnits(300) val := result.SealingResult // This call can fail due to, e.g. nonce collisions. Our miners existence depends on this. // We should deal with this, but MessageSendWithRetry is problematic. msgCid, err := node.PorcelainAPI.MessageSend( node.miningCtx, minerOwnerAddr, minerAddr, nil, gasPrice, gasUnits, "commitSector", val.SectorID, val.CommD[:], val.CommR[:], val.CommRStar[:], val.Proof[:], ) if err != nil { log.Errorf("failed to send commitSector message from %s to %s for sector with id %d: %s", minerOwnerAddr, minerAddr, val.SectorID, err) continue } node.StorageMiner.OnCommitmentSent(val, msgCid, nil) } case <-node.miningCtx.Done(): return } } }() // schedules sealing of staged piece-data if node.Repo.Config().Mining.AutoSealIntervalSeconds > 0 { go func() { for { select { case <-node.miningCtx.Done(): return case <-time.After(time.Duration(node.Repo.Config().Mining.AutoSealIntervalSeconds) * time.Second): log.Info("auto-seal has been triggered") if err := node.SectorBuilder().SealAllStagedSectors(node.miningCtx); err != nil { log.Errorf("scheduler received error from node.SectorBuilder.SealAllStagedSectors (%s) - exiting", err.Error()) return } } } }() } else { log.Debug("auto-seal is disabled") } node.setIsMining(true) return nil } func initSectorBuilderForNode(ctx context.Context, node *Node) (sectorbuilder.SectorBuilder, error) { minerAddr, err := node.miningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get node's mining address") } sectorSize, err := node.PorcelainAPI.MinerGetSectorSize(ctx, minerAddr) if err != nil { return nil, errors.Wrapf(err, "failed to get sector size for miner w/address %s", minerAddr.String()) } lastUsedSectorID, err := node.PorcelainAPI.MinerGetLastCommittedSectorID(ctx, minerAddr) if err != nil { return nil, errors.Wrapf(err, "failed to get last used sector id for miner w/address %s", minerAddr.String()) } // TODO: Currently, weconfigure the RustSectorBuilder to store its // metadata in the staging directory, it should be in its own directory. // // Tracked here: https://github.com/filecoin-project/rust-fil-proofs/issues/402 repoPath, err := node.Repo.Path() if err != nil { return nil, err } sectorDir, err := paths.GetSectorPath(node.Repo.Config().SectorBase.RootDir, repoPath) if err != nil { return nil, err } stagingDir, err := paths.StagingDir(sectorDir) if err != nil { return nil, err } sealedDir, err := paths.SealedDir(sectorDir) if err != nil { return nil, err } cfg := sectorbuilder.RustSectorBuilderConfig{ BlockService: node.blockservice, LastUsedSectorID: lastUsedSectorID, MetadataDir: stagingDir, MinerAddr: minerAddr, SealedSectorDir: sealedDir, StagedSectorDir: stagingDir, SectorClass: types.NewSectorClass(sectorSize), } sb, err := sectorbuilder.NewRustSectorBuilder(cfg) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("failed to initialize sector builder for miner %s", minerAddr.String())) } return sb, nil } func initStorageMinerForNode(ctx context.Context, node *Node) (*storage.Miner, error) { minerAddr, err := node.miningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get node's mining address") } miningOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr) if err != nil { return nil, errors.Wrap(err, "no mining owner available, skipping storage miner setup") } miner, err := storage.NewMiner(minerAddr, miningOwnerAddr, node, node.Repo.DealsDatastore(), node.PorcelainAPI) if err != nil { return nil, errors.Wrap(err, "failed to instantiate storage miner") } return miner, nil } // StopMining stops mining on new blocks. func (node *Node) StopMining(ctx context.Context) { node.setIsMining(false) if node.cancelMining != nil { node.cancelMining() } if node.miningDoneWg != nil { node.miningDoneWg.Wait() } // TODO: stop node.StorageMiner } // NewAddress creates a new account address on the default wallet backend. func (node *Node) NewAddress() (address.Address, error) { return wallet.NewAddress(node.Wallet) } // miningOwnerAddress returns the owner of miningAddr. // TODO: find a better home for this method func (node *Node) miningOwnerAddress(ctx context.Context, miningAddr address.Address) (address.Address, error) { ownerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, miningAddr) if err != nil { return address.Undef, errors.Wrap(err, "failed to get miner owner address") } return ownerAddr, nil } func (node *Node) handleSubscription(ctx context.Context, f pubSubProcessorFunc, fname string, s pubsub.Subscription, sname string) { for { pubSubMsg, err := s.Next(ctx) if err != nil { log.Errorf("%s.Next(): %s", sname, err) return } if err := f(ctx, pubSubMsg); err != nil { if err != context.Canceled { log.Errorf("%s(): %s", fname, err) } } } } // setupProtocols creates protocol clients and miners, then sets the node's APIs // for each func (node *Node) setupProtocols() error { _, mineDelay := node.MiningTimes() blockMiningAPI := block.New( node.AddNewBlock, node.ChainReader, mineDelay, node.StartMining, node.StopMining, node.CreateMiningWorker) node.BlockMiningAPI = &blockMiningAPI // set up retrieval client and api retapi := retrieval.NewAPI(retrieval.NewClient(node.host, node.blockTime, node.PorcelainAPI)) node.RetrievalAPI = &retapi // set up storage client and api smc := storage.NewClient(node.blockTime, node.host, node.PorcelainAPI) smcAPI := storage.NewAPI(smc) node.StorageAPI = &smcAPI return nil } // CreateMiningWorker creates a mining.Worker for the node using the configured // getStateTree, getWeight, and getAncestors functions for the node func (node *Node) CreateMiningWorker(ctx context.Context) (mining.Worker, error) { processor := consensus.NewDefaultProcessor() minerAddr, err := node.miningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get mining address") } minerPubKey, err := node.PorcelainAPI.MinerGetKey(ctx, minerAddr) if err != nil { return nil, errors.Wrap(err, "could not get key from miner actor") } minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr) if err != nil { log.Errorf("could not get owner address of miner actor") return nil, err } return mining.NewDefaultWorker( node.MsgPool, node.getStateTree, node.getWeight, node.getAncestors, processor, node.PowerTable, node.Blockstore, node.CborStore(), minerAddr, minerOwnerAddr, minerPubKey, node.Wallet, node.blockTime), nil } // getStateFromKey returns the state tree based on tipset fetched with provided key tsKey func (node *Node) getStateFromKey(ctx context.Context, tsKey types.SortedCidSet) (state.Tree, error) { stateCid, err := node.ChainReader.GetTipSetStateRoot(tsKey) if err != nil { return nil, err } return state.LoadStateTree(ctx, node.CborStore(), stateCid, builtin.Actors) } // getStateTree is the default GetStateTree function for the mining worker. func (node *Node) getStateTree(ctx context.Context, ts types.TipSet) (state.Tree, error) { return node.getStateFromKey(ctx, ts.ToSortedCidSet()) } // getWeight is the default GetWeight function for the mining worker. func (node *Node) getWeight(ctx context.Context, ts types.TipSet) (uint64, error) { parent, err := ts.Parents() if err != nil { return uint64(0), err } // TODO handle genesis cid more gracefully if parent.Len() == 0 { return node.Consensus.Weight(ctx, ts, nil) } pSt, err := node.getStateFromKey(ctx, parent) if err != nil { return uint64(0), err } return node.Consensus.Weight(ctx, ts, pSt) } // getAncestors is the default GetAncestors function for the mining worker. func (node *Node) getAncestors(ctx context.Context, ts types.TipSet, newBlockHeight *types.BlockHeight) ([]types.TipSet, error) { ancestorHeight := types.NewBlockHeight(consensus.AncestorRoundsNeeded) return chain.GetRecentAncestors(ctx, ts, node.ChainReader, newBlockHeight, ancestorHeight, sampling.LookbackParameter) } // -- Accessors // Host returns the nodes host. func (node *Node) Host() host.Host { return node.host } // SectorBuilder returns the nodes sectorBuilder. func (node *Node) SectorBuilder() sectorbuilder.SectorBuilder { return node.sectorBuilder } // BlockService returns the nodes blockservice. func (node *Node) BlockService() bserv.BlockService { return node.blockservice } // CborStore returns the nodes cborStore. func (node *Node) CborStore() *hamt.CborIpldStore { return node.cborStore } // IsMining returns a boolean indicating whether the node is mining blocks. func (node *Node) IsMining() bool { node.mining.Lock() defer node.mining.Unlock() return node.mining.isMining }
1
19,320
I'll replace this with the actual outbox object soon.
filecoin-project-venus
go
@@ -218,6 +218,9 @@ class AnnotationTest extends TestCase */ public function providerValidCodeParse(): iterable { + $codebase = $this->project_analyzer->getCodebase(); + $codebase->reportUnusedVariables(); + return [ 'nopType' => [ '<?php
1
<?php namespace Psalm\Tests; use Psalm\Config; use Psalm\Context; use const DIRECTORY_SEPARATOR; class AnnotationTest extends TestCase { use Traits\InvalidCodeAnalysisTestTrait; use Traits\ValidCodeAnalysisTestTrait; public function testPhpStormGenericsWithValidArrayIteratorArgument(): void { Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php function takesString(string $s): void {} /** @param ArrayIterator|string[] $i */ function takesArrayIteratorOfString(ArrayIterator $i): void { $s = $i->offsetGet("a"); takesString($s); foreach ($i as $s2) { takesString($s2); } }' ); $this->analyzeFile('somefile.php', new Context()); } public function testPhpStormGenericsWithValidTraversableArgument(): void { Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php function takesString(string $s): void {} /** @param Traversable|string[] $i */ function takesTraversableOfString(Traversable $i): void { foreach ($i as $s2) { takesString($s2); } }' ); $this->analyzeFile('somefile.php', new Context()); } public function testPhpStormGenericsWithClassProperty(): void { Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php /** @psalm-suppress MissingConstructor */ class Foo { /** @var \stdClass[]|\ArrayObject */ public $bar; /** * @return \stdClass[]|\ArrayObject */ public function getBar(): \ArrayObject { return $this->bar; } }' ); $this->analyzeFile('somefile.php', new Context()); } public function testPhpStormGenericsWithGeneratorArray(): void { Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php class A { /** * @return stdClass[]|Generator */ function getCollection(): Generator { yield new stdClass; } }' ); $this->analyzeFile('somefile.php', new Context()); } public function testPhpStormGenericsWithValidIterableArgument(): void { Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php function takesString(string $s): void {} /** @param iterable|string[] $i */ function takesArrayIteratorOfString(iterable $i): void { foreach ($i as $s2) { takesString($s2); } }' ); $this->analyzeFile('somefile.php', new Context()); } public function testPhpStormGenericsInvalidArgument(): void { $this->expectException(\Psalm\Exception\CodeException::class); $this->expectExceptionMessage('InvalidScalarArgument'); Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php function takesInt(int $s): void {} /** @param ArrayIterator|string[] $i */ function takesArrayIteratorOfString(ArrayIterator $i): void { $s = $i->offsetGet("a"); takesInt($s); }' ); $this->analyzeFile('somefile.php', new Context()); } public function testPhpStormGenericsNoTypehint(): void { $this->expectException(\Psalm\Exception\CodeException::class); $this->expectExceptionMessage('PossiblyInvalidMethodCall'); Config::getInstance()->allow_phpstorm_generics = true; $this->addFile( 'somefile.php', '<?php /** @param ArrayIterator|string[] $i */ function takesArrayIteratorOfString($i): void { $s = $i->offsetGet("a"); }' ); $this->analyzeFile('somefile.php', new Context()); } public function testInvalidParamDefault(): void { $this->expectException(\Psalm\Exception\CodeException::class); $this->expectExceptionMessage('InvalidParamDefault'); $this->addFile( 'somefile.php', '<?php /** * @param array $arr * @return void */ function foo($arr = false) {}' ); $this->analyzeFile('somefile.php', new Context()); } public function testInvalidParamDefaultButAllowedInConfig(): void { Config::getInstance()->add_param_default_to_docblock_type = true; $this->addFile( 'somefile.php', '<?php /** * @param array $arr * @return void */ function foo($arr = false) {} foo(false); foo(["hello"]);' ); $this->analyzeFile('somefile.php', new Context()); } public function testInvalidTypehintParamDefaultButAllowedInConfig(): void { $this->expectException(\Psalm\Exception\CodeException::class); $this->expectExceptionMessage('InvalidParamDefault'); Config::getInstance()->add_param_default_to_docblock_type = true; $this->addFile( 'somefile.php', '<?php function foo(array $arr = false) : void {}' ); $this->analyzeFile('somefile.php', new Context()); } /** * @return iterable<string,array{string,assertions?:array<string,string>,error_levels?:string[]}> */ public function providerValidCodeParse(): iterable { return [ 'nopType' => [ '<?php $a = "hello"; /** @var int $a */', 'assertions' => [ '$a' => 'int', ], ], 'validDocblockReturn' => [ '<?php /** * @return string */ function fooFoo(): string { return "boop"; } /** * @return array<int, string> */ function foo2(): array { return ["hello"]; } /** * @return array<int, string> */ function foo3(): array { return ["hello"]; }', ], 'reassertWithIs' => [ '<?php /** @param array $a */ function foo($a): void { if (is_array($a)) { // do something } }', 'assertions' => [], 'error_level' => ['RedundantConditionGivenDocblockType'], ], 'checkArrayWithIs' => [ '<?php /** @param mixed $b */ function foo($b): void { /** @var array */ $a = (array)$b; if (is_array($a)) { // do something } }', 'assertions' => [], 'error_level' => ['RedundantConditionGivenDocblockType'], ], 'goodDocblock' => [ '<?php class A { /** * @param A $a * @param bool $b */ public function g(A $a, $b): void { } }', ], 'goodDocblockInNamespace' => [ '<?php namespace Foo; class A { /** * @param \Foo\A $a * @param bool $b */ public function g(A $a, $b): void { } }', ], 'ignoreNullableReturn' => [ '<?php class A { /** @var int */ public $bar = 5; public function foo(): void {} } /** * @return ?A * @psalm-ignore-nullable-return */ function makeA() { return rand(0, 1) ? new A(): null; } function takeA(A $a): void { } $a = makeA(); $a->foo(); $a->bar = 7; takeA($a);', ], 'invalidDocblockParamSuppress' => [ '<?php /** * @param int $bar * @psalm-suppress MismatchingDocblockParamType */ function fooFoo(array $bar): void { }', ], 'differentDocblockParamClassSuppress' => [ '<?php class A {} class B {} /** * @param B $bar * @psalm-suppress MismatchingDocblockParamType */ function fooFoo(A $bar): void { }', ], 'varDocblock' => [ '<?php /** @var array<Exception> */ $a = []; $a[0]->getMessage();', ], 'ignoreVarDocblock' => [ '<?php /** * @var array<Exception> * @ignore-var */ $a = []; $a[0]->getMessage();', 'assertions' => [], 'error_level' => ['EmptyArrayAccess', 'MixedMethodCall'], ], 'psalmIgnoreVarDocblock' => [ '<?php /** * @var array<Exception> * @psalm-ignore-var */ $a = []; $a[0]->getMessage();', 'assertions' => [], 'error_level' => ['EmptyArrayAccess', 'MixedMethodCall'], ], 'mixedDocblockParamTypeDefinedInParent' => [ '<?php class A { /** @param mixed $a */ public function foo($a): void {} } class B extends A { public function foo($a): void {} }', ], 'intDocblockParamTypeDefinedInParent' => [ '<?php class A { /** @param int $a */ public function foo($a): void {} } class B extends A { public function foo($a): void {} }', ], 'varSelf' => [ '<?php class A { public function foo(): void {} public function getMeAgain(): void { /** @var self */ $me = $this; $me->foo(); } }', ], 'psalmVar' => [ '<?php class A { /** @psalm-var array<int, string> */ public $foo = []; public function updateFoo(): void { $this->foo[5] = "hello"; } }', ], 'psalmParam' => [ '<?php function takesInt(int $a): void {} /** * @psalm-param array<int, string> $a * @param string[] $a */ function foo(array $a): void { foreach ($a as $key => $value) { takesInt($key); } }', ], 'returnDocblock' => [ '<?php function foo(int $i): int { /** @var int */ return $i; }', ], 'doubleVar' => [ '<?php function foo() : array { return ["hello" => new stdClass, "goodbye" => new stdClass]; } $a = null; $b = null; /** * @var string $key * @var stdClass $value */ foreach (foo() as $key => $value) { $a = $key; $b = $value; }', 'assertions' => [ '$a' => 'null|string', '$b' => 'null|stdClass', ], ], 'allowOptionalParamsToBeEmptyArray' => [ '<?php /** @param array{b?: int, c?: string} $a */ function foo(array $a = []) : void {}', ], 'allowEmptyVarAnnotation' => [ '<?php /** * @param $x */ function example(array $x) : void {}', ], 'allowCapitalisedNamespacedString' => [ '<?php namespace Foo; /** * @param String $x */ function example(string $x) : void {}', ], 'megaClosureAnnotationWithoutSpacing' => [ '<?php /** @var array{a:Closure():(array<mixed, mixed>|null), b?:Closure():array<mixed, mixed>, c?:Closure():array<mixed, mixed>, d?:Closure():array<mixed, mixed>, e?:Closure():(array{f:null|string, g:null|string, h:null|string, i:string, j:mixed, k:mixed, l:mixed, m:mixed, n:bool, o?:array{0:string}}|null), p?:Closure():(array{f:null|string, g:null|string, h:null|string, q:string, i:string, j:mixed, k:mixed, l:mixed, m:mixed, n:bool, o?:array{0:string}}|null), r?:Closure():(array<mixed, mixed>|null), s:array<mixed, mixed>} */ $arr = []; $arr["a"]();', ], 'megaClosureAnnotationWithSpacing' => [ '<?php /** * @var array{ * a: Closure() : (array<mixed, mixed>|null), * b?: Closure() : array<mixed, mixed>, * c?: Closure() : array<mixed, mixed>, * d?: Closure() : array<mixed, mixed>, * e?: Closure() : (array{ * f: null|string, * g: null|string, * h: null|string, * i: string, * j: mixed, * k: mixed, * l: mixed, * m: mixed, * n: bool, * o?: array{0:string} * }|null), * p?: Closure() : (array{ * f: null|string, * g: null|string, * h: null|string, * q: string, * i: string, * j: mixed, * k: mixed, * l: mixed, * m: mixed, * n: bool, * o?: array{0:string} * }|null), * r?: Closure() : (array<mixed, mixed>|null), * s: array<mixed, mixed> * } * * Some text */ $arr = []; $arr["a"]();', ], 'multipeLineGenericArray' => [ '<?php /** * @psalm-type MiddlewareArray = array< * class-string<\Exception>, * array<int, string> * > * * @psalm-type RuleArray = array{ * rule: string, * controller?: class-string<\Exception>, * redirect?: string, * code?: int, * type?: string, * middleware?: MiddlewareArray * } * * Foo Bar */ class A {}', ], 'builtInClassInAShape' => [ '<?php /** * @return array{d:Exception} * @psalm-suppress InvalidReturnType */ function f() {}' ], 'slashAfter?' => [ '<?php namespace ns; /** @param ?\stdClass $s */ function foo($s) : void { } foo(null); foo(new \stdClass);', ], 'returnTypeShouldBeNullable' => [ '<?php /** * @return stdClass */ function foo() : ?stdClass { return rand(0, 1) ? new stdClass : null; } $f = foo(); if ($f) {}', ], 'spreadOperatorAnnotation' => [ '<?php /** @param string[] $s */ function foo(string ...$s) : void {} /** @param string ...$s */ function bar(string ...$s) : void {} foo("hello", "goodbye"); bar("hello", "goodbye"); foo(...["hello", "goodbye"]); bar(...["hello", "goodbye"]);', ], 'spreadOperatorByRefAnnotation' => [ '<?php /** @param string &...$s */ function foo(&...$s) : void {} /** @param string ...&$s */ function bar(&...$s) : void {} /** @param string[] &$s */ function bat(&...$s) : void {} $a = "hello"; $b = "goodbye"; $c = "hello again"; foo($a); bar($b); bat($c);', 'assertions' => [ '$a' => 'string', '$b' => 'string', '$c' => 'string', ], ], 'valueReturnType' => [ '<?php /** * @param "a"|"b" $_p */ function acceptsLiteral($_p): void {} /** * @return "a"|"b" */ function returnsLiteral(): string { return rand(0,1) ? "a" : "b"; } acceptsLiteral(returnsLiteral());', ], 'typeAliasBeforeClass' => [ '<?php /** * @psalm-type CoolType = A|B|null */ class A {} class B {} /** @return CoolType */ function foo() { if (rand(0, 1)) { return new A(); } if (rand(0, 1)) { return new B(); } return null; } /** @param CoolType $a **/ function bar ($a) : void { } bar(foo());', ], 'typeAliasBeforeFunction' => [ '<?php /** * @psalm-type A_OR_B = A|B * @psalm-type CoolType = A_OR_B|null * @return CoolType */ function foo() { if (rand(0, 1)) { return new A(); } if (rand(0, 1)) { return new B(); } return null; } class A {} class B {} /** @param CoolType $a **/ function bar ($a) : void { } bar(foo());', ], 'typeAliasInSeparateBlockBeforeFunction' => [ '<?php /** * @psalm-type CoolType = A|B|null */ /** * @return CoolType */ function foo() { if (rand(0, 1)) { return new A(); } if (rand(0, 1)) { return new B(); } return null; } class A {} class B {} /** @param CoolType $a **/ function bar ($a) : void { } bar(foo());', ], 'almostFreeStandingTypeAlias' => [ '<?php /** * @psalm-type CoolType = A|B|null */ // this breaks up the line class A {} class B {} /** @return CoolType */ function foo() { if (rand(0, 1)) { return new A(); } if (rand(0, 1)) { return new B(); } return null; } /** @param CoolType $a **/ function bar ($a) : void { } bar(foo());', ], 'typeAliasUsedTwice' => [ '<?php /** @psalm-type TA = array<int, string> */ class Bar { public function foo() : void { $bar = /** @return TA */ function() { return ["hello"]; }; /** @var array<int, TA> */ $bat = [$bar(), $bar()]; foreach ($bat as $b) { echo $b[0]; } } } /** * @psalm-type _A=array{elt:int} * @param _A $p * @return _A */ function f($p) { /** @var _A */ $r = $p; return $r; }', ], 'listUnpackWithDocblock' => [ '<?php interface I {} class A implements I { public function bar() : void {} } /** @return I[] */ function foo() : array { return [new A()]; } /** @var A $a1 */ [$a1, $a2] = foo(); $a1->bar();', ], 'spaceInType' => [ '<?php /** @return string | null */ function foo(string $s = null) { return $s; }', ], 'missingReturnTypeWithBadDocblockIgnoreBoth' => [ '<?php /** * @return [bad] */ function fooBar() { }', [], [ 'InvalidDocblock' => \Psalm\Config::REPORT_INFO, 'MissingReturnType' => \Psalm\Config::REPORT_INFO, ], ], 'objectWithPropertiesAnnotation' => [ '<?php /** @param object{foo:string} $o */ function foo(object $o) : string { return $o->foo; } $s = new \stdClass(); $s->foo = "hello"; foo($s); class A { /** @var string */ public $foo = "hello"; } foo(new A);', ], 'refineTypeInNestedCall' => [ '<?php function foo(array $arr): \Generator { /** @var array<string, mixed> $arr */ foreach (array_filter(array_keys($arr), function (string $key) : bool { return strpos($key, "BAR") === 0; }) as $envVar) { yield $envVar => [getenv($envVar)]; } }', ], 'allowAnnotationOnServer' => [ '<?php function foo(): \Generator { /** @var array<string, mixed> $_SERVER */ foreach (array_filter(array_keys($_SERVER), function (string $key) : bool { return strpos($key, "BAR") === 0; }) as $envVar) { yield $envVar => [getenv($envVar)]; } }', ], 'annotationOnForeachItems' => [ '<?php function foo(array $arr) : void { $item = null; /** @var string $item */ foreach ($arr as $item) {} if (is_null($item)) {} } function bar(array $arr) : void { $item = null; /** @var string $item */ foreach ($arr as $item => $_) {} if (is_null($item)) {} } function bat(array $arr) : void { $item = null; /** * @psalm-suppress MixedArrayAccess * @var string $item */ foreach ($arr as list($item)) {} if (is_null($item)) {} } function baz(array $arr) : void { $item = null; /** * @psalm-suppress MixedArrayAccess * @var string $item */ foreach ($arr as list($item => $_)) {} if (is_null($item)) {} }', [], [ 'MixedAssignment', ], ], 'extraneousDocblockParamName' => [ '<?php /** * @param string $foo * @param string[] $bar * @param string[] $barb */ function f(string $foo, array $barb): void {}', ], 'nonEmptyArray' => [ '<?php /** @param non-empty-array<string> $arr */ function foo(array $arr) : void { foreach ($arr as $a) {} echo $a; } foo(["a", "b", "c"]); /** @param array<string> $arr */ function bar(array $arr) : void { if (!$arr) { return; } foo($arr); }', ], 'nonEmptyArrayInNamespace' => [ '<?php namespace ns; /** @param non-empty-array<string> $arr */ function foo(array $arr) : void { foreach ($arr as $a) {} echo $a; } foo(["a", "b", "c"]); /** @param array<string> $arr */ function bar(array $arr) : void { if (!$arr) { return; } foo($arr); }', ], 'noExceptionOnIntersection' => [ '<?php class Foo { /** @var null|\DateTime&\DateTimeImmutable */ private $s = null; }', ], 'intersectionWithSpace' => [ '<?php interface A { public function foo() : void; } interface B { public function bar() : void; } /** @param A & B $a */ function f(A $a) : void { $a->foo(); $a->bar(); }', ], 'allowClosingComma' => [ '<?php /** * @psalm-type _Alias=array{ * foo: string, * bar: string, * baz: array{ * a: int, * }, * } */ class Foo { } /** * @param array{ * foo: string, * bar: string, * baz: array{ * a: int, * }, * } $foo */ function foo(array $foo) : int { return count($foo); } /** * @var array{ * foo:string, * bar:string, * baz:string, * } $foo */ $foo = ["foo" => "", "bar" => "", "baz" => ""];', ], 'returnNumber' => [ '<?php class C { /** * @return 1 */ public static function barBar() { return 1; } }', ], 'returnNumberForInterface' => [ '<?php interface I { /** * @return 1 */ public static function barBar(); }', ], 'psalmTypeAnnotationAboveReturn' => [ '<?php /** * @psalm-type Person = array{name: string, age: int} */ /** * @psalm-return Person */ function getPerson_error(): array { $json = \'{"name": "John", "age": 44}\'; /** @psalm-var Person */ return json_decode($json, true); }' ], 'allowDocblockDefinedTKeyedArrayIntoNonEmpty' => [ '<?php /** @param non-empty-array $_bar */ function foo(array $_bar) : void { } /** @var array{0:list<string>, 1:list<int>} */ $bar = [[], []]; foo($bar);' ], 'allowResourceInList' => [ '<?php /** @param list<scalar|array|object|resource|null> $_s */ function foo(array $_s) : void { }' ], 'possiblyUndefinedObjectProperty' => [ '<?php function consume(string $value): void { echo $value; } /** @var object{value?: string} $data */ $data = json_decode("{}", false); consume($data->value ?? "");' ], 'throwSelf' => [ '<?php namespace Foo; class MyException extends \Exception { /** * @throws self */ public static function create(): void { throw new self(); } }' ], 'parseTrailingCommaInReturn' => [ '<?php /** * @psalm-return array{ * a: int, * b: string, * } */ function foo(): array { return ["a" => 1, "b" => "two"]; }' ], 'falsableFunctionAllowedWhenBooleanExpected' => [ '<?php /** @psalm-return bool */ function alwaysFalse1() { return false; } function alwaysFalse2(): bool { return false; }' ], 'dontInheritDocblockReturnWhenRedeclared' => [ '<?php interface Id {} class UserId implements Id {} interface Entity { /** @psalm-return Id */ function id(): Id; } class User implements Entity { public function id(): UserId { return new UserId(); } }', [], [], '7.4' ], 'arrayWithKeySlashesAndNewline' => [ '<?php $arr = ["foo\\bar\nbaz" => "literal"];', [ '$arr' => 'array{\'foo\\\\bar\nbaz\': string}' ] ], 'doubleSpaceBeforeAt' => [ '<?php /** * @param string $c */ function foo($c) : void {}' ], 'throwsAnnotationWithBarAndSpace' => [ '<?php /** * @throws \Exception| \InvalidArgumentException */ function bar() : void {}' ], 'varDocblockAboveCall' => [ '<?php function example(string $s): void { if (preg_match(\'{foo-(\w+)}\', $s, $m)) { /** @var array{string, string} $m */ takesString($m[1]); } } function takesString(string $s): void {}' ], 'noCrashWithoutAssignment' => [ '<?php /** @var DateTime $obj */ echo $obj->format("Y");' ], 'intMaskWithClassConstants' => [ '<?php class FileFlag { public const OPEN = 1; public const MODIFIED = 2; public const NEW = 4; } /** * @param int-mask<FileFlag::OPEN, FileFlag::MODIFIED, FileFlag::NEW> $flags */ function takesFlags(int $flags) : void { echo $flags; } takesFlags(FileFlag::MODIFIED | FileFlag::NEW);' ], 'intMaskWithZero' => [ '<?php /** @param int-mask<1,2> $_flags */ function takesFlags(int $_flags): void {} takesFlags(0); ' ], 'intMaskOfWithClassWildcard' => [ '<?php class FileFlag { public const OPEN = 1; public const MODIFIED = 2; public const NEW = 4; } /** * @param int-mask-of<FileFlag::*> $flags */ function takesFlags(int $flags) : void { echo $flags; } takesFlags(FileFlag::MODIFIED | FileFlag::NEW);' ], 'intMaskOfWithZero' => [ '<?php class FileFlag { public const OPEN = 1; public const MODIFIED = 2; public const NEW = 4; } /** @param int-mask-of<FileFlag::*> $_flags */ function takesFlags(int $_flags): void {} takesFlags(0); ' ], 'emptyStringFirst' => [ '<?php /** * @param \'\'|\'a\'|\'b\' $v */ function testBad(string $v): void { echo $v; }' ], ]; } /** * @return iterable<string,array{string,error_message:string,1?:string[],2?:bool,3?:string}> */ public function providerInvalidCodeParse(): iterable { return [ 'invalidClassMethodReturn' => [ '<?php class C { /** * @return $thus */ public function barBar() { return $this; } }', 'error_message' => 'MissingDocblockType', ], 'invalidClassMethodReturnBrackets' => [ '<?php class C { /** * @return [] */ public static function barBar() { return []; } }', 'error_message' => 'InvalidDocblock', ], 'invalidInterfaceMethodReturn' => [ '<?php interface I { /** * @return $thus */ public static function barBar(); }', 'error_message' => 'MissingDocblockType', ], 'invalidInterfaceMethodReturnBrackets' => [ '<?php interface I { /** * @return [] */ public static function barBar(); }', 'error_message' => 'InvalidDocblock', ], 'invalidPropertyBrackets' => [ '<?php class A { /** * @var [] */ public $bar; }', 'error_message' => 'InvalidDocblock', ], 'invalidReturnClassWithComma' => [ '<?php interface I { /** * @return 1, */ public static function barBar(); }', 'error_message' => 'InvalidDocblock', ], 'returnClassWithComma' => [ '<?php interface I { /** * @return a, */ public static function barBar(); }', 'error_message' => 'InvalidDocblock', ], 'missingParamType' => [ '<?php /** * @param string $bar */ function fooBar(): void { } fooBar("hello");', 'error_message' => 'TooManyArguments - src' . DIRECTORY_SEPARATOR . 'somefile.php:8:21 - Too many arguments for fooBar ' . '- expecting 0 but saw 1', ], 'missingParamVar' => [ '<?php /** * @param string */ function fooBar(): void { }', 'error_message' => 'InvalidDocblock - src' . DIRECTORY_SEPARATOR . 'somefile.php:5:21 - Badly-formatted @param', ], 'invalidSlashWithString' => [ '<?php /** * @return \?string */ function foo() { return rand(0, 1) ? "hello" : null; }', 'error_message' => 'InvalidDocblock', ], 'missingReturnTypeWithBadDocblock' => [ '<?php /** * @return [bad] */ function fooBar() { }', 'error_message' => 'MissingReturnType', [ 'InvalidDocblock' => \Psalm\Config::REPORT_INFO, ], ], 'invalidDocblockReturn' => [ '<?php /** * @return string */ function fooFoo(): int { return 5; }', 'error_message' => 'MismatchingDocblockReturnType', ], 'intParamTypeDefinedInParent' => [ '<?php class A { public function foo(int $a): void {} } class B extends A { public function foo($a): void {} }', 'error_message' => 'MissingParamType', 'error_levels' => ['MethodSignatureMismatch'], ], 'psalmInvalidVar' => [ '<?php class A { /** @psalm-var array<int, string> */ public $foo = []; public function updateFoo(): void { $this->foo["boof"] = "hello"; } }', 'error_message' => 'InvalidPropertyAssignmentValue', ], 'incorrectDocblockOrder' => [ '<?php class MyClass { /** * Comment * @var $fooPropTypo string */ public $fooProp = "/tmp/file.txt"; }', 'error_message' => 'MissingDocblockType', ], 'badlyFormattedVar' => [ '<?php /** * @return string[] */ function returns_strings() { /** @var array(string) $result */ $result = ["example"]; return $result; }', 'error_message' => 'InvalidDocblock', ], 'badlyWrittenVar' => [ '<?php /** @param mixed $x */ function myvalue($x): void { /** @var $myVar MyNS\OtherClass */ $myVar = $x->conn()->method(); $myVar->otherMethod(); }', 'error_message' => 'MissingDocblockType', ], 'dontOverrideSameType' => [ '<?php class A { /** @return ?int */ public function foo(): ?int { if (rand(0, 1)) return 5; } }', 'error_message' => 'InvalidReturnType', ], 'alwaysCheckReturnType' => [ '<?php class A {} /** * @return A * @psalm-suppress MismatchingDocblockReturnType */ function foo(): B { return new A; }', 'error_message' => 'UndefinedClass', ], 'preventBadBoolean' => [ '<?php function foo(): boolean { return true; }', 'error_message' => 'UndefinedClass', ], 'undefinedDocblockClassCall' => [ '<?php class B { /** * @return A * @psalm-suppress UndefinedDocblockClass * @psalm-suppress InvalidReturnStatement * @psalm-suppress InvalidReturnType */ public function foo() { return new stdClass(); } public function bar() { $this->foo()->bar(); } } ', 'error_message' => 'UndefinedDocblockClass', ], 'preventBadTKeyedArrayFormat' => [ '<?php /** * @param array{} $arr */ function bar(array $arr): void {}', 'error_message' => 'InvalidDocblock', ], 'noPhpStormAnnotationsThankYou' => [ '<?php /** @param ArrayIterator|string[] $i */ function takesArrayIteratorOfString(ArrayIterator $i): void {}', 'error_message' => 'MismatchingDocblockParamType', ], 'noPhpStormAnnotationsPossiblyInvalid' => [ '<?php /** @param ArrayIterator|string[] $i */ function takesArrayIteratorOfString($i): void { $s = $i->offsetGet("a"); }', 'error_message' => 'PossiblyInvalidMethodCall', ], 'doubleBar' => [ '<?php /** @param PDO||Closure|numeric $a */ function foo($a) : void {}', 'error_message' => 'InvalidDocblock', ], 'badStringVar' => [ '<?php /** @var string; */ $a = "hello";', 'error_message' => 'InvalidDocblock', ], 'badCallableVar' => [ '<?php /** @return Closure(int): */ function foo() : callable { return function () : void {}; }', 'error_message' => 'InvalidDocblock', ], 'hyphenInType' => [ '<?php /** * @return - Description */ function example() { return "placeholder"; }', 'error_message' => 'InvalidDocblock', ], 'badAmpersand' => [ '<?php /** @return &array */ function foo() : array { return []; }', 'error_message' => 'InvalidDocblock', ], 'invalidTypeAlias' => [ '<?php /** * @psalm-type CoolType = A|B> */ class A {}', 'error_message' => 'InvalidDocblock', ], 'typeAliasInTKeyedArray' => [ '<?php /** * @psalm-type aType null|"a"|"b"|"c"|"d" */ /** @psalm-return array{0:bool,1:aType} */ function f(): array { return [(bool)rand(0,1), rand(0,1) ? "z" : null]; }', 'error_message' => 'InvalidReturnStatement', ], 'noCrashOnHalfDoneArrayPropertyType' => [ '<?php class A { /** @var array< */ private $foo = []; }', 'error_message' => 'InvalidDocblock', ], 'noCrashOnHalfDoneTKeyedArrayPropertyType' => [ '<?php class A { /** @var array{ */ private $foo = []; }', 'error_message' => 'InvalidDocblock', ], 'noCrashOnInvalidClassTemplateAsType' => [ '<?php /** * @template T as ' . ' */ class A {}', 'error_message' => 'InvalidDocblock', ], 'noCrashOnInvalidFunctionTemplateAsType' => [ '<?php /** * @template T as ' . ' */ function foo() : void {}', 'error_message' => 'InvalidDocblock', ], 'returnTypeNewLineIsIgnored' => [ '<?php /** * @return * Some text */ function foo() {}', 'error_message' => 'MissingReturnType', ], 'objectWithPropertiesAnnotationNoMatchingProperty' => [ '<?php /** @param object{foo:string} $o */ function foo(object $o) : string { return $o->foo; } class A {} foo(new A);', 'error_message' => 'InvalidArgument', ], 'badVar' => [ '<?php /** @var Foo */ $a = $_GET["foo"];', 'error_message' => 'UndefinedDocblockClass', ], 'badPsalmType' => [ '<?php /** * @psalm-type Foo = array{a:} */', 'error_message' => 'InvalidDocblock', ], 'mismatchingDocblockParamName' => [ '<?php /** @param string[] $bar */ function f(array $barb): void {}', 'error_message' => 'InvalidDocblockParamName - src' . DIRECTORY_SEPARATOR . 'somefile.php:2:41', ], 'nonEmptyArrayCalledWithEmpty' => [ '<?php /** @param non-empty-array<string> $arr */ function foo(array $arr) : void { foreach ($arr as $a) {} echo $a; } foo([]);', 'error_message' => 'InvalidArgument', ], 'nonEmptyArrayCalledWithEmptyInNamespace' => [ '<?php namespace ns; /** @param non-empty-array<string> $arr */ function foo(array $arr) : void { foreach ($arr as $a) {} echo $a; } foo([]);', 'error_message' => 'InvalidArgument', ], 'nonEmptyArrayCalledWithArray' => [ '<?php /** @param non-empty-array<string> $arr */ function foo(array $arr) : void { foreach ($arr as $a) {} echo $a; } /** @param array<string> $arr */ function bar(array $arr) { foo($arr); }', 'error_message' => 'ArgumentTypeCoercion', ], 'spreadOperatorArrayAnnotationBadArg' => [ '<?php /** @param string[] $s */ function foo(string ...$s) : void {} foo(5);', 'error_message' => 'InvalidScalarArgument', ], 'spreadOperatorArrayAnnotationBadSpreadArg' => [ '<?php /** @param string[] $s */ function foo(string ...$s) : void {} foo(...[5]);', 'error_message' => 'InvalidScalarArgument', ], 'spreadOperatorByRefAnnotationBadCall1' => [ '<?php /** @param string &...$s */ function foo(&...$s) : void {} $a = 1; foo($a);', 'error_message' => 'InvalidScalarArgument', ], 'spreadOperatorByRefAnnotationBadCall2' => [ '<?php /** @param string ...&$s */ function foo(&...$s) : void {} $b = 2; foo($b);', 'error_message' => 'InvalidScalarArgument', ], 'spreadOperatorByRefAnnotationBadCall3' => [ '<?php /** @param string[] &$s */ function foo(&...$s) : void {} $c = 3; foo($c);', 'error_message' => 'InvalidScalarArgument', ], 'identifyReturnType' => [ '<?php /** @return array{hello: string} */ function foo() {}', 'error_message' => 'InvalidReturnType - src' . DIRECTORY_SEPARATOR . 'somefile.php:2:33', ], 'invalidParamDocblockAsterisk' => [ '<?php /** * @param * $reference */ function f($reference) {}', 'error_message' => 'MissingDocblockType', ], 'canNeverReturnDeclaredType' => [ '<?php /** @psalm-return false */ function alwaysFalse() : bool { return true; }', 'error_message' => 'InvalidReturnStatement - src' . DIRECTORY_SEPARATOR . 'somefile.php:6:32', ], 'falsableWithExpectedTypeTrue' => [ '<?php /** @psalm-return true */ function alwaysFalse() { return false; }', 'error_message' => 'FalsableReturnStatement - src' . DIRECTORY_SEPARATOR . 'somefile.php:6:32', ], 'DuplicatedParam' => [ '<?php /** * @psalm-param array $arr * @psalm-param array $arr */ function bar(array $arr): void {}', 'error_message' => 'InvalidDocblock - src' . DIRECTORY_SEPARATOR . 'somefile.php:6:21 - Found duplicated @param or prefixed @param tag in docblock for bar', ], 'DuplicatedReturn' => [ '<?php /** * @return void * @return void */ function bar(array $arr): void {}', 'error_message' => 'InvalidDocblock - src' . DIRECTORY_SEPARATOR . 'somefile.php:6:21 - Found duplicated @return or prefixed @return tag in docblock for bar', ], 'missingClassForTKeyedArray' => [ '<?php interface I { /** @return object{id: int, a: int} */ public function run(); } class C implements I { /** @return X */ public function run() {} }', 'error_message' => 'ImplementedReturnTypeMismatch' ], 'unexpectedImportType' => [ '<?php /** @psalm-import-type asd */ function f(): void {} ', 'error_message' => 'PossiblyInvalidDocblockTag', ], 'unexpectedVarOnFunction' => [ '<?php /** @var int $p */ function f($p): void {} ', 'error_message' => 'PossiblyInvalidDocblockTag', ], 'unterminatedParentheses' => [ '<?php /** @return ( */ function f() {} ', 'error_message' => 'InvalidDocblock', ], 'emptyParentheses' => [ '<?php /** @return () */ function f() {} ', 'error_message' => 'InvalidDocblock', ], 'unbalancedParentheses' => [ "<?php /** @return ((string) */ function f(): string { return ''; } ", 'error_message' => 'InvalidDocblock', ], ]; } }
1
10,978
Providers are called *before* `setUp()`, so I don't think you can access properties here.
vimeo-psalm
php
@@ -11,7 +11,7 @@ namespace Xunit /// Apply this attribute to your test method to specify an active issue. /// </summary> [TraitDiscoverer("Xunit.NetCore.Extensions.ActiveIssueDiscoverer", "Xunit.NetCore.Extensions")] - [AttributeUsage(AttributeTargets.Method, AllowMultiple = true)] + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Class, AllowMultiple = true)] public class ActiveIssueAttribute : Attribute, ITraitAttribute { public ActiveIssueAttribute(int issueNumber, TestPlatforms platforms) { }
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System; using Xunit.Sdk; namespace Xunit { /// <summary> /// Apply this attribute to your test method to specify an active issue. /// </summary> [TraitDiscoverer("Xunit.NetCore.Extensions.ActiveIssueDiscoverer", "Xunit.NetCore.Extensions")] [AttributeUsage(AttributeTargets.Method, AllowMultiple = true)] public class ActiveIssueAttribute : Attribute, ITraitAttribute { public ActiveIssueAttribute(int issueNumber, TestPlatforms platforms) { } public ActiveIssueAttribute(string issue, TestPlatforms platforms) { } public ActiveIssueAttribute(int issueNumber, TargetFrameworkMonikers framework) { } public ActiveIssueAttribute(string issue, TargetFrameworkMonikers framework) { } public ActiveIssueAttribute(int issueNumber, TestPlatforms platforms = TestPlatforms.Any, TargetFrameworkMonikers framework = (TargetFrameworkMonikers)0) { } public ActiveIssueAttribute(string issue, TestPlatforms platforms = TestPlatforms.Any, TargetFrameworkMonikers framework = (TargetFrameworkMonikers)0) { } } }
1
12,791
While this allows it to be applied does it correctly cause the entire class to be skipped?
dotnet-buildtools
.cs
@@ -143,6 +143,11 @@ def main(): if args.seed is not None: logger.info(f'Set random seed to {args.seed}, ' f'deterministic: {args.deterministic}') + + # import torch.distributed as dist + # rank = dist.get_rank() + # args.seed = args.seed + rank + set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed
1
import argparse import copy import os import os.path as osp import time import warnings import mmcv import torch from mmcv import Config, DictAction from mmcv.runner import init_dist from mmcv.utils import get_git_hash from mmdet import __version__ from mmdet.apis import set_random_seed, train_detector from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import collect_env, get_root_logger def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--resume-from', help='the checkpoint file to resume from') parser.add_argument( '--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( '--gpus', type=int, help='number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-ids', type=int, nargs='+', help='ids of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both ' 'specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info meta['config'] = cfg.pretty_text # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: logger.info(f'Set random seed to {args.seed}, ' f'deterministic: {args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed meta['exp_name'] = osp.basename(args.config) model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__ + get_git_hash()[:7], CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) if __name__ == '__main__': main()
1
21,641
clean unnecessary modification.
open-mmlab-mmdetection
py
@@ -44,8 +44,8 @@ describe('createElement(jsx)', () => { }); it('should set VNode#key property', () => { - expect(<div />).to.have.property('key').that.is.undefined; - expect(<div a="a" />).to.have.property('key').that.is.undefined; + expect(<div />).to.have.property('key').that.is.empty; + expect(<div a="a" />).to.have.property('key').that.is.empty; expect(<div key="1" />).to.have.property('key', '1'); });
1
import { createElement } from '../../'; import { expect } from 'chai'; const h = createElement; /** @jsx createElement */ /*eslint-env browser, mocha */ // const buildVNode = (nodeName, attributes, children=[]) => ({ // nodeName, // children, // attributes, // key: attributes && attributes.key // }); describe('createElement(jsx)', () => { it('should return a VNode', () => { let r; expect(() => (r = h('foo'))).not.to.throw(); expect(r).to.be.an('object'); // expect(r).to.be.an.instanceof(VNode); expect(r).to.have.property('type', 'foo'); expect(r) .to.have.property('props') .that.eql({}); // expect(r).to.have.deep.property('props.children').that.eql(null); }); it('should set VNode#type property', () => { expect(<div />).to.have.property('type', 'div'); function Test() { return <div />; } expect(<Test />).to.have.property('type', Test); }); it('should set VNode.constructor property to prevent json injection', () => { const vnode = <span />; expect(vnode.constructor).to.equal(undefined); }); it('should set VNode#props property', () => { const props = {}; expect(h('div', props).props).to.deep.equal(props); }); it('should set VNode#key property', () => { expect(<div />).to.have.property('key').that.is.undefined; expect(<div a="a" />).to.have.property('key').that.is.undefined; expect(<div key="1" />).to.have.property('key', '1'); }); it('should not set VNode#props.key property', () => { expect(<div />).to.not.have.nested.property('props.key'); expect(<div key="1" />).to.not.have.nested.property('props.key'); expect(<div key={0} />).to.not.have.nested.property('props.key'); expect(<div key={''} />).to.not.have.nested.property('props.key'); }); it('should set VNode#ref property', () => { expect(<div />).to.have.property('ref').that.is.undefined; expect(<div a="a" />).to.have.property('ref').that.is.undefined; const emptyFunction = () => {}; expect(<div ref={emptyFunction} />).to.have.property('ref', emptyFunction); }); it('should not set VNode#props.ref property', () => { expect(<div />).to.not.have.nested.property('props.ref'); expect(<div ref={() => {}} />).to.not.have.nested.property('props.ref'); }); it('should have ordered VNode properties', () => { expect(Object.keys(<div />).filter(key => !/^_/.test(key))).to.deep.equal([ 'type', 'props', 'key', 'ref', 'constructor' ]); }); it('should preserve raw props', () => { let props = { foo: 'bar', baz: 10, func: () => {} }, r = h('foo', props); expect(r) .to.be.an('object') .with.property('props') .that.deep.equals(props); }); it('should support element children', () => { let kid1 = h('bar'); let kid2 = h('baz'); let r = h('foo', null, kid1, kid2); expect(r) .to.be.an('object') .with.nested.deep.property('props.children', [kid1, kid2]); }); it('should support multiple element children, given as arg list', () => { let kid1 = h('bar'); let kid3 = h('test'); let kid2 = h('baz', null, kid3); let r = h('foo', null, kid1, kid2); expect(r) .to.be.an('object') .with.nested.deep.property('props.children', [kid1, kid2]); }); it('should handle multiple element children, given as an array', () => { let kid1 = h('bar'); let kid3 = h('test'); let kid2 = h('baz', null, kid3); let r = h('foo', null, [kid1, kid2]); expect(r) .to.be.an('object') .with.nested.deep.property('props.children', [kid1, kid2]); }); it('should support nested children', () => { const m = x => h(x); expect(h('foo', null, m('a'), [m('b'), m('c')], m('d'))) .to.have.nested.property('props.children') .that.eql([m('a'), [m('b'), m('c')], m('d')]); expect(h('foo', null, [m('a'), [m('b'), m('c')], m('d')])) .to.have.nested.property('props.children') .that.eql([m('a'), [m('b'), m('c')], m('d')]); expect(h('foo', { children: [m('a'), [m('b'), m('c')], m('d')] })) .to.have.nested.property('props.children') .that.eql([m('a'), [m('b'), m('c')], m('d')]); expect(h('foo', { children: [[m('a'), [m('b'), m('c')], m('d')]] })) .to.have.nested.property('props.children') .that.eql([[m('a'), [m('b'), m('c')], m('d')]]); expect(h('foo', { children: m('a') })) .to.have.nested.property('props.children') .that.eql(m('a')); expect(h('foo', { children: 'a' })) .to.have.nested.property('props.children') .that.eql('a'); }); it('should support text children', () => { let r = h('foo', null, 'textstuff'); expect(r) .to.be.an('object') .with.nested.property('props.children') .that.equals('textstuff'); }); it('should NOT merge adjacent text children', () => { let r = h( 'foo', null, 'one', 'two', h('bar'), 'three', h('baz'), h('baz'), 'four', null, 'five', 'six' ); expect(r) .to.be.an('object') .with.nested.property('props.children') .that.deep.equals([ 'one', 'two', h('bar'), 'three', h('baz'), h('baz'), 'four', null, 'five', 'six' ]); }); it('should not merge nested adjacent text children', () => { let r = h( 'foo', null, 'one', ['two', null, 'three'], null, ['four', null, 'five', null], 'six', null ); expect(r) .to.be.an('object') .with.nested.property('props.children') .that.deep.equals([ 'one', ['two', null, 'three'], null, ['four', null, 'five', null], 'six', null ]); }); it('should not merge children that are boolean values', () => { let r = h('foo', null, 'one', true, 'two', false, 'three'); expect(r) .to.be.an('object') .with.nested.property('props.children') .that.deep.equals(['one', true, 'two', false, 'three']); }); it('should not merge children of components', () => { let Component = ({ children }) => children; let r = h(Component, null, 'x', 'y'); expect(r) .to.be.an('object') .with.nested.property('props.children') .that.deep.equals(['x', 'y']); }); it('should ignore props.children if children are manually specified', () => { expect( <div a children={['a', 'b']}> c </div> ).to.eql(<div a>c</div>); }); });
1
14,732
I think these assertions want to be `.to.not.exist` which would pass for `null` or `undefined`
preactjs-preact
js
@@ -24,11 +24,9 @@ import ( ) func ExampleOpenKeeper() { - // This example is used in https://gocloud.dev/howto/secrets/#vault-ctor - - // import _ "gocloud.dev/secrets/hashivault" - - // Variables set up elsewhere: + // PRAGMA(gocloud.dev): Package this example for gocloud.dev. + // PRAGMA(gocloud.dev): Add a blank import: _ "gocloud.dev/secrets/hashivault" + // PRAGMA(gocloud.dev): Skip until next blank line. ctx := context.Background() // Get a client to use with the Vault API.
1
// Copyright 2019 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package hashivault_test import ( "context" "log" "github.com/hashicorp/vault/api" "gocloud.dev/secrets" "gocloud.dev/secrets/hashivault" ) func ExampleOpenKeeper() { // This example is used in https://gocloud.dev/howto/secrets/#vault-ctor // import _ "gocloud.dev/secrets/hashivault" // Variables set up elsewhere: ctx := context.Background() // Get a client to use with the Vault API. client, err := hashivault.Dial(ctx, &hashivault.Config{ Token: "CLIENT_TOKEN", APIConfig: api.Config{ Address: "http://127.0.0.1:8200", }, }) if err != nil { log.Fatal(err) } // Construct a *secrets.Keeper. keeper := hashivault.OpenKeeper(client, "my-key", nil) defer keeper.Close() } func Example_openFromURL() { // This example is used in https://gocloud.dev/howto/secrets/#vault // import _ "gocloud.dev/secrets/hashivault" // Variables set up elsewhere: ctx := context.Background() keeper, err := secrets.OpenKeeper(ctx, "hashivault://mykey") if err != nil { log.Fatal(err) } defer keeper.Close() }
1
19,590
Don't think you need this since this is not a URL example.
google-go-cloud
go
@@ -43,6 +43,14 @@ class TestFakerNumber < Test::Unit::TestCase assert @tester.decimal(l_digits: 4, r_digits: 5).to_s.match(/[0-9]{4}\.[0-9]{5}/) end + def test_decimal_within + assert @tester.decimal_within(l_digits: 1, r_digits: 1, range: 0..1).to_s.match(/[0-1]{1}\.[0-1]{1}/) + assert @tester.decimal_within(l_digits: 1).to_s.match(/[0-9]{1}\.[0-9]{2}/) + assert @tester.decimal(l_digits: 4, r_digits: 5).to_s.match(/[0-9]{4}\.[0-9]{5}/) + assert @tester.decimal_within(l_digits: 3, r_digits: 3, range: 1..100).to_s.match(/[0-9]*\.[0-9]*/) + assert @tester.decimal_within(l_digits: 3, r_digits: 3, range: 1..1000).to_s.match(/[0-9]*\.[0-9]*/) + end + def test_digit assert @tester.digit.to_s.match(/[0-9]{1}/) assert((1..1000).collect { |_i| @tester.digit == 9 }.include?(true))
1
# frozen_string_literal: true require_relative '../../test_helper' require 'minitest/mock' class TestFakerNumber < Test::Unit::TestCase def setup @tester = Faker::Number end def test_leading_zero_number assert_match(/^0[0-9]{9}/, @tester.leading_zero_number) assert_match(/^0[0-9]{8}/, @tester.leading_zero_number(digits: 9)) end def test_number assert @tester.number(digits: 10).to_s.match(/[0-9]{10}/) 10.times do |digits| digits += 1 assert @tester.number(digits: digits).to_s.match(/^[0-9]{#{digits}}$/) end assert @tester.number(digits: 10).to_s.length == 10 assert @tester.number(digits: 1).to_s.length == 1 end def test_number_with_one_digit random_number = 4 in_range = lambda { |range| assert_equal(0..9, range) random_number } Faker::Base.stub(:rand, in_range) do assert_equal(random_number, @tester.number(digits: 1)) end end def test_decimal assert @tester.decimal(l_digits: 1, r_digits: 1).to_s.match(/[0-9]{1}\.[0-9]{1}/) assert @tester.decimal(l_digits: 2).to_s.match(/[0-9]{2}\.[0-9]{2}/) assert @tester.decimal(l_digits: 4, r_digits: 5).to_s.match(/[0-9]{4}\.[0-9]{5}/) end def test_digit assert @tester.digit.to_s.match(/[0-9]{1}/) assert((1..1000).collect { |_i| @tester.digit == 9 }.include?(true)) end def test_even_distribution assert stats = {} assert times = 10_000 times.times do assert num = @tester.digit stats[num] ||= 0 assert stats[num] += 1 end stats.each do |_k, v| assert_in_delta 10.0, 100.0 * v / times, 2.0 end end def test_normal n = 10_000 values = Array.new(n) { @tester.normal(mean: 150.0, standard_deviation: 100.0) } mean = values.reduce(:+) / n.to_f variance = values.inject(0) { |var, value| var + (value - mean)**2 } / (n - 1).to_f std_dev = Math.sqrt variance assert_in_delta 150.0, mean, 5.0 assert_in_delta 100.0, std_dev, 3.0 end def test_between 100.times do random_number = @tester.between(from: -50, to: 50) assert random_number >= -50, "Expected >= -50, but got #{random_number}" assert random_number <= 50, "Expected <= 50, but got #{random_number}" end end def test_within 100.times do random_number = @tester.within(range: -50..50) assert random_number >= -50, "Expected >= -50, but got #{random_number}" assert random_number <= 50, "Expected <= 50, but got #{random_number}" end end def test_positive 100.times do random_number = @tester.positive(from: 1, to: 100) assert random_number >= 1, "Expected >= 1, but got #{random_number}" assert random_number <= 100, "Expected <= 100, but got #{random_number}" end end def test_negative 100.times do random_number = @tester.negative(from: -1, to: -100) assert random_number <= -1, "Expected <= -1, but got #{random_number}" assert random_number >= -100, "Expected >= -100, but got #{random_number}" end end def test_force_positive random_number = @tester.positive(from: -1, to: -100) assert random_number >= 1, "Expected >= 1, but got #{random_number}" assert random_number <= 100, "Expected <= 100, but got #{random_number}" end def test_force_negative random_number = @tester.negative(from: 1, to: 100) assert random_number <= -1, "Expected <= -1, but got #{random_number}" assert random_number >= -100, "Expected >= -100, but got #{random_number}" end def test_parameters_order random_number = @tester.between(from: 100, to: 1) assert random_number >= 1, "Expected >= 1, but got #{random_number}" assert random_number <= 100, "Expected <= 100, but got #{random_number}" end def test_hexadecimal assert @tester.hexadecimal(digits: 4).match(/[0-9a-f]{4}/) assert @tester.hexadecimal(digits: 7).match(/[0-9a-f]{7}/) end end
1
9,806
Maybe a test to check if the generated value is within the boundaries would be nice, what do you think?
faker-ruby-faker
rb
@@ -12,7 +12,7 @@ namespace AutoRest.Core.Utilities { public void WriteFile(string path, string contents) { - File.WriteAllText(path, contents, Encoding.UTF8); + File.WriteAllText(path, contents, new UTF8Encoding(false, true)); } /// <summary>
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. using System; using System.IO; using System.Net; using System.Text; namespace AutoRest.Core.Utilities { public class FileSystem : IFileSystem { public void WriteFile(string path, string contents) { File.WriteAllText(path, contents, Encoding.UTF8); } /// <summary> /// Returns whether or not that <paramref name="path"/> is an absolute URI or rooted path /// </summary> /// <param name="path"></param> /// <returns></returns> public bool IsCompletePath(string path) => Path.IsPathRooted(path) || Uri.IsWellFormedUriString(path, UriKind.Absolute); /// <summary> /// Roots the <paramref name="relativePath"/> using the <paramref name="rootPath"/> /// Works whether the <paramref name="rootPath"/> is an absolute URI (e.g. https://contoso.com/swaggers) /// or a rooted local URI (e.g. C:/swaggers/) /// </summary> /// <param name="rootPath"></param> /// <param name="relativePath"></param> /// <returns></returns> public string MakePathRooted(Uri rootPath, string relativePath) { var combined = new Uri(Path.Combine(rootPath.ToString(), relativePath)); return combined.IsAbsoluteUri ? combined.AbsoluteUri : combined.LocalPath; } public string ReadFileAsText(string path) { using (var client = new WebClient()) { client.Headers.Add("User-Agent: AutoRest"); client.Encoding = Encoding.UTF8; return client.DownloadString(path); } } public TextWriter GetTextWriter(string path) { if (File.Exists(path)) { return File.AppendText(path); } return File.CreateText(path); #if FORCE_UTF8_BOM // existing ARS files have utf8withbom. // only necessary for some weird manual testing :D var utf8WithBom = new System.Text.UTF8Encoding(true); return new StreamWriter(path, false, utf8WithBom); #endif } public bool FileExists(string path) { return File.Exists(path); } public void DeleteFile(string path) { if (File.Exists(path)) { File.Delete(path); } } public void DeleteDirectory(string directory) { Directory.Delete(directory, true); } public void EmptyDirectory(string directory) { foreach (var filePath in Directory.GetFiles(directory)) { File.Delete(filePath); } } public string[] GetFiles(string startDirectory, string filePattern, SearchOption options) { return Directory.GetFiles(startDirectory, filePattern, options); } public bool DirectoryExists(string path) { return Directory.Exists(path); } public void CreateDirectory(string path) { Directory.CreateDirectory(path); } public string[] GetDirectories(string startDirectory, string filePattern, SearchOption options) { return Directory.GetDirectories(startDirectory, filePattern, options); } public string GetCurrentDir() { return Directory.GetCurrentDirectory(); } public string GetParentDir(string path) { return Directory.GetParent(Path.Combine(Directory.GetCurrentDirectory(), path)).FullName; } } }
1
23,378
Suppresses UTF-8 BOM in outputs
Azure-autorest
java
@@ -65,10 +65,10 @@ type Config struct { Inbounds Inbounds Outbounds Outbounds - // Filter and Interceptor that will be applied to all outgoing and incoming - // requests respectively. - Filter transport.Filter - Interceptor transport.Interceptor + // Inbound and Outbound Middleware that will be applied to all outgoing and + // incoming requests respectively. + InboundMiddleware InboundMiddleware + OutboundMiddleware OutboundMiddleware Tracer opentracing.Tracer }
1
// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package yarpc import ( "fmt" "sync" "go.uber.org/yarpc/internal/channel" "go.uber.org/yarpc/internal/errors" "go.uber.org/yarpc/internal/request" intsync "go.uber.org/yarpc/internal/sync" "go.uber.org/yarpc/transport" "github.com/opentracing/opentracing-go" ) // Dispatcher object is used to configure a YARPC application; it is used by // Clients to send RPCs, and by Procedures to recieve them. This object is what // enables an application to be transport-agnostic. type Dispatcher interface { transport.Registrar transport.ChannelProvider // Inbounds returns a copy of the list of inbounds for this RPC object. // // The Inbounds will be returned in the same order that was used in the // configuration. Inbounds() Inbounds // Starts the RPC allowing it to accept and processing new incoming // requests. // // Blocks until the RPC is ready to start accepting new requests. Start() error // Stops the RPC. No new requests will be accepted. // // Blocks until the RPC has stopped. Stop() error } // Config specifies the parameters of a new RPC constructed via New. type Config struct { Name string Inbounds Inbounds Outbounds Outbounds // Filter and Interceptor that will be applied to all outgoing and incoming // requests respectively. Filter transport.Filter Interceptor transport.Interceptor Tracer opentracing.Tracer } // Inbounds contains a list of inbound transports type Inbounds []transport.Inbound // Outbounds encapsulates a service and its outbounds type Outbounds map[string]transport.Outbounds // NewDispatcher builds a new Dispatcher using the specified Config. func NewDispatcher(cfg Config) Dispatcher { if cfg.Name == "" { panic("a service name is required") } return dispatcher{ Name: cfg.Name, Registrar: transport.NewMapRegistry(cfg.Name), inbounds: cfg.Inbounds, outbounds: convertOutbounds(cfg.Outbounds, cfg.Filter), Interceptor: cfg.Interceptor, deps: transport.NoDeps.WithTracer(cfg.Tracer), } } // convertOutbounds applys filters and creates validator outbounds func convertOutbounds(outbounds Outbounds, filter transport.Filter) Outbounds { //TODO(apb): ensure we're not given the same underlying outbound for each RPC type convertedOutbounds := make(Outbounds, len(outbounds)) for service, outs := range outbounds { var ( unaryOutbound transport.UnaryOutbound onewayOutbound transport.OnewayOutbound ) // apply filters and create ValidatorOutbounds if outs.Unary != nil { unaryOutbound = transport.ApplyFilter(outs.Unary, filter) unaryOutbound = request.UnaryValidatorOutbound{UnaryOutbound: unaryOutbound} } // TODO(apb): apply oneway outbound filter if outs.Oneway != nil { onewayOutbound = request.OnewayValidatorOutbound{OnewayOutbound: outs.Oneway} } convertedOutbounds[service] = transport.Outbounds{ Unary: unaryOutbound, Oneway: onewayOutbound, } } return convertedOutbounds } // dispatcher is the standard RPC implementation. // // It allows use of multiple Inbounds and Outbounds together. type dispatcher struct { transport.Registrar Name string inbounds Inbounds outbounds Outbounds Interceptor transport.Interceptor deps transport.Deps } func (d dispatcher) Inbounds() Inbounds { inbounds := make(Inbounds, len(d.inbounds)) copy(inbounds, d.inbounds) return inbounds } func (d dispatcher) Channel(service string) transport.Channel { if rs, ok := d.outbounds[service]; ok { return channel.MultiOutbound(d.Name, service, rs) } panic(noOutboundForService{Service: service}) } func (d dispatcher) Start() error { var ( mu sync.Mutex startedInbounds []transport.Inbound startedOutbounds []transport.Outbound ) service := transport.ServiceDetail{ Name: d.Name, Registry: d, } startInbound := func(i transport.Inbound) func() error { return func() error { if err := i.Start(service, d.deps); err != nil { return err } mu.Lock() startedInbounds = append(startedInbounds, i) mu.Unlock() return nil } } startOutbound := func(o transport.Outbound) func() error { return func() error { if o == nil { return nil } if err := o.Start(d.deps); err != nil { return err } mu.Lock() startedOutbounds = append(startedOutbounds, o) mu.Unlock() return nil } } var wait intsync.ErrorWaiter for _, i := range d.inbounds { wait.Submit(startInbound(i)) } // TODO record the name of the service whose outbound failed for _, o := range d.outbounds { wait.Submit(startOutbound(o.Unary)) wait.Submit(startOutbound(o.Oneway)) } errs := wait.Wait() if len(errs) == 0 { return nil } // Failed to start so stop everything that was started. wait = intsync.ErrorWaiter{} for _, i := range startedInbounds { wait.Submit(i.Stop) } for _, o := range startedOutbounds { wait.Submit(o.Stop) } if newErrors := wait.Wait(); len(newErrors) > 0 { errs = append(errs, newErrors...) } return errors.ErrorGroup(errs) } func (d dispatcher) Register(rs []transport.Registrant) { registrants := make([]transport.Registrant, 0, len(rs)) for _, r := range rs { switch r.HandlerSpec.Type() { case transport.Unary: h := transport.ApplyInterceptor(r.HandlerSpec.Unary(), d.Interceptor) r.HandlerSpec = transport.NewUnaryHandlerSpec(h) case transport.Oneway: //TODO(apb): add oneway interceptors https://github.com/yarpc/yarpc-go/issues/413 default: panic(fmt.Sprintf("unknown handler type %q for service %q, procedure %q", r.HandlerSpec.Type(), r.Service, r.Procedure)) } registrants = append(registrants, r) } d.Registrar.Register(registrants) } func (d dispatcher) Stop() error { var wait intsync.ErrorWaiter for _, i := range d.inbounds { wait.Submit(i.Stop) } for _, o := range d.outbounds { if o.Unary != nil { wait.Submit(o.Unary.Stop) } if o.Oneway != nil { wait.Submit(o.Oneway.Stop) } } if errs := wait.Wait(); len(errs) > 0 { return errors.ErrorGroup(errs) } return nil }
1
11,381
order wrong for "outgoing and incoming"
yarpc-yarpc-go
go
@@ -43,6 +43,7 @@ public class MemoryCircuitBreaker extends CircuitBreaker { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); + private boolean isMemoryCircuitBreakerEnabled; private final long heapMemoryThreshold; // Assumption -- the value of these parameters will be set correctly before invoking getDebugInfo()
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.util.circuitbreaker; import java.lang.invoke.MethodHandles; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import org.apache.solr.core.SolrConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * <p> * Tracks the current JVM heap usage and triggers if it exceeds the defined percentage of the maximum * heap size allocated to the JVM. This circuit breaker is a part of the default CircuitBreakerManager * so is checked for every request -- hence it is realtime. Once the memory usage goes below the threshold, * it will start allowing queries again. * </p> * * <p> * The memory threshold is defined as a percentage of the maximum memory allocated -- see memoryCircuitBreakerThresholdPct * in solrconfig.xml. * </p> */ public class MemoryCircuitBreaker extends CircuitBreaker { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); private final long heapMemoryThreshold; // Assumption -- the value of these parameters will be set correctly before invoking getDebugInfo() private final ThreadLocal<Long> seenMemory = new ThreadLocal<>(); private final ThreadLocal<Long> allowedMemory = new ThreadLocal<>(); public MemoryCircuitBreaker(SolrConfig solrConfig) { super(solrConfig); long currentMaxHeap = MEMORY_MX_BEAN.getHeapMemoryUsage().getMax(); if (currentMaxHeap <= 0) { throw new IllegalArgumentException("Invalid JVM state for the max heap usage"); } int thresholdValueInPercentage = solrConfig.memoryCircuitBreakerThresholdPct; double thresholdInFraction = thresholdValueInPercentage / (double) 100; heapMemoryThreshold = (long) (currentMaxHeap * thresholdInFraction); if (heapMemoryThreshold <= 0) { throw new IllegalStateException("Memory limit cannot be less than or equal to zero"); } } // TODO: An optimization can be to trip the circuit breaker for a duration of time // after the circuit breaker condition is matched. This will optimize for per call // overhead of calculating the condition parameters but can result in false positives. @Override public boolean isTripped() { if (!isEnabled()) { return false; } long localAllowedMemory = getCurrentMemoryThreshold(); long localSeenMemory = calculateLiveMemoryUsage(); allowedMemory.set(localAllowedMemory); seenMemory.set(localSeenMemory); return (localSeenMemory >= localAllowedMemory); } @Override public String getDebugInfo() { if (seenMemory.get() == 0L || allowedMemory.get() == 0L) { log.warn("MemoryCircuitBreaker's monitored values (seenMemory, allowedMemory) not set"); } return "seenMemory=" + seenMemory.get() + " allowedMemory=" + allowedMemory.get(); } private long getCurrentMemoryThreshold() { return heapMemoryThreshold; } /** * Calculate the live memory usage for the system. This method has package visibility * to allow using for testing. * @return Memory usage in bytes. */ protected long calculateLiveMemoryUsage() { // NOTE: MemoryUsageGaugeSet provides memory usage statistics but we do not use them // here since it will require extra allocations and incur cost, hence it is cheaper to use // MemoryMXBean directly. Ideally, this call should not add noticeable // latency to a query -- but if it does, please signify on SOLR-14588 return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed(); } }
1
36,273
We know it's a boolean and it's in the MemoryCircuitBreaker, why not simply call it `enabled` (like many other Solr plugins do)?
apache-lucene-solr
java
@@ -0,0 +1,5 @@ +class AddIncludesTeamToIndividualPlans < ActiveRecord::Migration + def change + add_column :individual_plans, :includes_team, :boolean, default: false + end +end
1
1
11,356
Should team be a "feature" that can be "included" or a type? In code it seems to me that a `team` flag makes sense.
thoughtbot-upcase
rb
@@ -172,6 +172,9 @@ type OutputInfo struct { } func (i *InputParams) updateParams(p *string) { + logParamsMutex.Lock() + defer logParamsMutex.Unlock() + if p == nil { return }
1
// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service // logRequest is a server-side pre-defined data structure type logRequest struct { ClientInfo clientInfo `json:"client_info"` LogSource int64 `json:"log_source"` RequestTimeMs int64 `json:"request_time_ms"` LogEvent []logEvent `json:"log_event"` } // ClientInfo is a server-side pre-defined data structure type clientInfo struct { // ClientType is defined on server side to clarify which client library is used. ClientType string `json:"client_type"` } // LogEvent is a server-side pre-defined data structure type logEvent struct { EventTimeMs int64 `json:"event_time_ms"` EventUptimeMs int64 `json:"event_uptime_ms"` SourceExtensionJSON string `json:"source_extension_json"` } // logResponse is a server-side pre-defined data structure type logResponse struct { NextRequestWaitMillis int64 `json:"NextRequestWaitMillis,string"` LogResponseDetails []logResponseDetails `json:"LogResponseDetails"` } // LogResponseDetails is a server-side pre-defined data structure type logResponseDetails struct { ResponseAction responseAction `json:"ResponseAction"` } // ResponseAction is a server-side pre-defined data structure type responseAction string const ( // responseActionUnknown - If the client sees this, it should delete the logRequest (not retry). // It may indicate that a new response action was added, which the client // doesn't yet understand. (Deleting rather than retrying will prevent // infinite loops.) The server will do whatever it can to prevent this // occurring (by not indicating an action to clients that are behind the // requisite version for the action). responseActionUnknown responseAction = "RESPONSE_ACTION_UNKNOWN" // retryRequestLater - The client should retry the request later, via normal scheduling. retryRequestLater responseAction = "RETRY_REQUEST_LATER" // deleteRequest - The client should delete the request. This action will apply for // successful requests, and non-retryable requests. deleteRequest responseAction = "DELETE_REQUEST" ) // ComputeImageToolsLogExtension contains all log info, which should be align with sawmill server side configuration. type ComputeImageToolsLogExtension struct { // This id is a random guid for correlation among multiple log lines of a single call ID string `json:"id"` CloudBuildID string `json:"cloud_build_id"` ToolAction string `json:"tool_action"` Status string `json:"status"` ElapsedTimeMs int64 `json:"elapsed_time_ms"` EventTimeMs int64 `json:"event_time_ms"` InputParams *InputParams `json:"input_params,omitempty"` OutputInfo *OutputInfo `json:"output_info,omitempty"` } // InputParams contains the union of all APIs' param info. To simplify logging service, we // avoid defining different schemas for each API. type InputParams struct { ImageImportParams *ImageImportParams `json:"image_import_input_params,omitempty"` ImageExportParams *ImageExportParams `json:"image_export_input_params,omitempty"` InstanceImportParams *InstanceImportParams `json:"instance_import_input_params,omitempty"` } // ImageImportParams contains all input params for image import type ImageImportParams struct { *CommonParams ImageName string `json:"image_name,omitempty"` DataDisk bool `json:"data_disk"` OS string `json:"os,omitempty"` SourceFile string `json:"source_file,omitempty"` SourceImage string `json:"source_image,omitempty"` NoGuestEnvironment bool `json:"no_guest_environment"` Family string `json:"family,omitempty"` Description string `json:"description,omitempty"` NoExternalIP bool `json:"no_external_ip"` HasKmsKey bool `json:"has_kms_key"` HasKmsKeyring bool `json:"has_kms_keyring"` HasKmsLocation bool `json:"has_kms_location"` HasKmsProject bool `json:"has_kms_project"` StorageLocation string `json:"storage_location,omitempty"` } // ImageExportParams contains all input params for image export type ImageExportParams struct { *CommonParams DestinationURI string `json:"destination_uri,omitempty"` SourceImage string `json:"source_image,omitempty"` Format string `json:"format,omitempty"` } // InstanceImportParams contains all input params for instance import type InstanceImportParams struct { *CommonParams InstanceName string `json:"instance_name,omitempty"` OvfGcsPath string `json:"ovf_gcs_path,omitempty"` CanIPForward bool `json:"can_ip_forward"` DeletionProtection bool `json:"deletion_protection"` MachineType string `json:"machine_type,omitempty"` NetworkInterface string `json:"network_interface,omitempty"` NetworkTier string `json:"network_tier,omitempty"` PrivateNetworkIP string `json:"private_network_ip,omitempty"` NoExternalIP bool `json:"no_external_ip,omitempty"` NoRestartOnFailure bool `json:"no_restart_on_failure"` OS string `json:"os,omitempty"` ShieldedIntegrityMonitoring bool `json:"shielded_integrity_monitoring"` ShieldedSecureBoot bool `json:"shielded_secure_boot"` ShieldedVtpm bool `json:"shielded_vtpm"` Tags string `json:"tags,omitempty"` HasBootDiskKmsKey bool `json:"has_boot_disk_kms_key"` HasBootDiskKmsKeyring bool `json:"has_boot_disk_kms_keyring"` HasBootDiskKmsLocation bool `json:"has_boot_disk_kms_location"` HasBootDiskKmsProject bool `json:"has_boot_disk_kms_project"` NoGuestEnvironment bool `json:"no_guest_environment"` NodeAffinityLabel string `json:"node_affinity_label,omitempty"` } // CommonParams is only used to organize the code without impacting hierarchy of data type CommonParams struct { ClientID string `json:"client_id,omitempty"` Network string `json:"network,omitempty"` Subnet string `json:"subnet,omitempty"` Zone string `json:"zone,omitempty"` Timeout string `json:"timeout,omitempty"` Project string `json:"project,omitempty"` ObfuscatedProject string `json:"obfuscated_project,omitempty"` Labels string `json:"labels,omitempty"` ScratchBucketGcsPath string `json:"scratch_bucket_gcs_path,omitempty"` Oauth string `json:"oauth,omitempty"` ComputeEndpointOverride string `json:"compute_endpoint_override,omitempty"` DisableGcsLogging bool `json:"disable_gcs_logging"` DisableCloudLogging bool `json:"disable_cloud_logging"` DisableStdoutLogging bool `json:"disable_stdout_logging"` } // OutputInfo contains output values from the tools execution type OutputInfo struct { // Size of import/export sources (image or file) SourcesSizeGb []int64 `json:"sources_size_gb,omitempty"` // Size of import/export targets (image or file) TargetsSizeGb []int64 `json:"targets_size_gb,omitempty"` // Failure message of the command FailureMessage string `json:"failure_message,omitempty"` // Failure message of the command without privacy info FailureMessageWithoutPrivacyInfo string `json:"failure_message_without_privacy_info,omitempty"` } func (i *InputParams) updateParams(p *string) { if p == nil { return } project := *p obfuscatedProject := Hash(project) if i.ImageImportParams != nil { i.ImageImportParams.CommonParams.Project = project i.ImageImportParams.CommonParams.ObfuscatedProject = obfuscatedProject } if i.ImageExportParams != nil { i.ImageExportParams.CommonParams.Project = project i.ImageExportParams.CommonParams.ObfuscatedProject = obfuscatedProject } if i.InstanceImportParams != nil { i.InstanceImportParams.CommonParams.Project = project i.InstanceImportParams.CommonParams.ObfuscatedProject = obfuscatedProject } }
1
9,585
Why is this called update params when it's updating project info? Should p called project?
GoogleCloudPlatform-compute-image-tools
go
@@ -44,7 +44,8 @@ const ( // below are templates for history_tree table addHistoryTreeQuery = `INSERT INTO history_tree (` + `shard_id, tree_id, branch_id, data, data_encoding) ` + - `VALUES (:shard_id, :tree_id, :branch_id, :data, :data_encoding) ` + `VALUES (:shard_id, :tree_id, :branch_id, :data, :data_encoding) ` + + `ON DUPLICATE KEY UPDATE data=VALUES(data), data_encoding=VALUES(data_encoding)` getHistoryTreeQuery = `SELECT branch_id, data, data_encoding FROM history_tree WHERE shard_id = ? AND tree_id = ? `
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package mysql import ( "database/sql" "go.temporal.io/server/common/persistence/sql/sqlplugin" ) const ( // below are templates for history_node table addHistoryNodesQuery = `INSERT INTO history_node (` + `shard_id, tree_id, branch_id, node_id, txn_id, data, data_encoding) ` + `VALUES (:shard_id, :tree_id, :branch_id, :node_id, :txn_id, :data, :data_encoding) ` getHistoryNodesQuery = `SELECT node_id, txn_id, data, data_encoding FROM history_node ` + `WHERE shard_id = ? AND tree_id = ? AND branch_id = ? AND node_id >= ? and node_id < ? ORDER BY shard_id, tree_id, branch_id, node_id, txn_id LIMIT ? ` deleteHistoryNodesQuery = `DELETE FROM history_node WHERE shard_id = ? AND tree_id = ? AND branch_id = ? AND node_id >= ? ` // below are templates for history_tree table addHistoryTreeQuery = `INSERT INTO history_tree (` + `shard_id, tree_id, branch_id, data, data_encoding) ` + `VALUES (:shard_id, :tree_id, :branch_id, :data, :data_encoding) ` getHistoryTreeQuery = `SELECT branch_id, data, data_encoding FROM history_tree WHERE shard_id = ? AND tree_id = ? ` deleteHistoryTreeQuery = `DELETE FROM history_tree WHERE shard_id = ? AND tree_id = ? AND branch_id = ? ` ) // For history_node table: // InsertIntoHistoryNode inserts a row into history_node table func (mdb *db) InsertIntoHistoryNode(row *sqlplugin.HistoryNodeRow) (sql.Result, error) { // NOTE: Query 5.6 doesn't support clustering order, to workaround, we let txn_id multiple by -1 row.TxnID = -row.TxnID return mdb.conn.NamedExec(addHistoryNodesQuery, row) } // SelectFromHistoryNode reads one or more rows from history_node table func (mdb *db) SelectFromHistoryNode(filter sqlplugin.HistoryNodeSelectFilter) ([]sqlplugin.HistoryNodeRow, error) { var rows []sqlplugin.HistoryNodeRow err := mdb.conn.Select(&rows, getHistoryNodesQuery, filter.ShardID, filter.TreeID, filter.BranchID, filter.MinNodeID, filter.MaxNodeID, filter.PageSize) // NOTE: since we let txn_id multiple by -1 when inserting, we have to revert it back here for index := range rows { rows[index].TxnID = -rows[index].TxnID } return rows, err } // DeleteFromHistoryNode deletes one or more rows from history_node table func (mdb *db) DeleteFromHistoryNode(filter sqlplugin.HistoryNodeDeleteFilter) (sql.Result, error) { return mdb.conn.Exec(deleteHistoryNodesQuery, filter.ShardID, filter.TreeID, filter.BranchID, filter.MinNodeID) } // For history_tree table: // InsertIntoHistoryTree inserts a row into history_tree table func (mdb *db) InsertIntoHistoryTree(row *sqlplugin.HistoryTreeRow) (sql.Result, error) { return mdb.conn.NamedExec(addHistoryTreeQuery, row) } // SelectFromHistoryTree reads one or more rows from history_tree table func (mdb *db) SelectFromHistoryTree(filter sqlplugin.HistoryTreeSelectFilter) ([]sqlplugin.HistoryTreeRow, error) { var rows []sqlplugin.HistoryTreeRow err := mdb.conn.Select(&rows, getHistoryTreeQuery, filter.ShardID, filter.TreeID) return rows, err } // DeleteFromHistoryTree deletes one or more rows from history_tree table func (mdb *db) DeleteFromHistoryTree(filter sqlplugin.HistoryTreeDeleteFilter) (sql.Result, error) { return mdb.conn.Exec(deleteHistoryTreeQuery, filter.ShardID, filter.TreeID, filter.BranchID) }
1
10,912
`upsertHistoryTreeQuery` is a better name for this query now. Is it ok to change history?
temporalio-temporal
go
@@ -33,8 +33,12 @@ namespace Nethermind.Core } string date = DateTime.Now.ToString("yyyyMMdd"); string gitTag = File.Exists(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "git-hash")) ? File.ReadAllText(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "git-hash")).Trim().Replace("g", "") : string.Empty; - - Description = $"Nethermind/v{gitTag}-{date}/{RuntimeInformation.OSArchitecture}-{osDescription}/{RuntimeInformation.FrameworkDescription.Trim().Replace(".NET ", "").Replace(" ", "")}"; + string gitBranch = File.Exists(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "git-branch")) ? File.ReadAllText(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "git-branch")).Trim().Replace("/", "-") : string.Empty; + if (gitBranch != "master") + { + gitTag = gitTag.Insert(5, "b"); + } + Description = $"Nethermind/v{gitTag}-{date}/{gitBranch}/{RuntimeInformation.OSArchitecture}-{osDescription}/{RuntimeInformation.FrameworkDescription.Trim().Replace(".NET ", "").Replace(" ", "")}"; } public static string Version { get; }
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.IO; using System.Runtime.InteropServices; using System.Text.RegularExpressions; namespace Nethermind.Core { public static class ClientVersion { static ClientVersion() { string osDescription = RuntimeInformation.OSDescription; if (osDescription.Contains('#')) { int indexOfHash = osDescription.IndexOf('#'); osDescription = osDescription.Substring(0, Math.Max(0, indexOfHash - 1)); } string date = DateTime.Now.ToString("yyyyMMdd"); string gitTag = File.Exists(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "git-hash")) ? File.ReadAllText(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "git-hash")).Trim().Replace("g", "") : string.Empty; Description = $"Nethermind/v{gitTag}-{date}/{RuntimeInformation.OSArchitecture}-{osDescription}/{RuntimeInformation.FrameworkDescription.Trim().Replace(".NET ", "").Replace(" ", "")}"; } public static string Version { get; } public static string Description { get; } } }
1
23,016
b did not mean branch - it meant the next version - so probably better to be able to release from a tag on the branch so we can create a hotfix branch of the 1.4.1 tag and tag it 1.4.1b and then version is picked as 1.4.1b
NethermindEth-nethermind
.cs
@@ -9,7 +9,6 @@ * Created on: Jul 26, 2017 * Author: William F Godoy [email protected] */ -#include <mpi.h> #include <ios> //std::ios_base::failure #include <iostream> //std::cout
1
/* * Distributed under the OSI-approved Apache License, Version 2.0. See * accompanying file Copyright.txt for details. * * helloBPSZWrapper.cpp: Simple self-descriptive example of how to write a * variable to a BP File that lives in several MPI processes and is compressed * with SZ http://www.bzip.org/ * * Created on: Jul 26, 2017 * Author: William F Godoy [email protected] */ #include <mpi.h> #include <ios> //std::ios_base::failure #include <iostream> //std::cout #include <numeric> //std::iota #include <stdexcept> //std::invalid_argument std::exception #include <vector> #include <adios2.h> int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); /** Application variable uints from 0 to 1000 */ std::vector<double> myvars(1000); std::iota(myvars.begin(), myvars.end(), 0.f); const std::size_t Nx = myvars.size(); const std::size_t inputBytes = Nx * sizeof(double); try { /** ADIOS class factory of IO class objects, DebugON is recommended */ adios2::ADIOS adios(MPI_COMM_WORLD, adios2::DebugON); // Get a Transform of type SZ adios2::Operator &adiosSZ = adios.DefineOperator("SZVariableCompressor", "SZ"); /*** IO class object: settings and factory of Settings: Variables, * Parameters, Transports, and Execution: Engines */ adios2::IO &bpIO = adios.DeclareIO("BPFile_N2N_SZ"); /** global array : name, { shape (total) }, { start (local) }, { count * (local) }, all are constant dimensions */ adios2::Variable<double> &var = bpIO.DefineVariable<double>( "var", {size * Nx}, {rank * Nx}, {Nx}, adios2::ConstantDims); // 1st way: adding transform metadata to variable to Engine can decide: // &adiosSZ gets mapped to bpUInts.TransformInfo[SZID].Operator const unsigned int SZID = var.AddTransform(adiosSZ, {{"foo", "0.01"}}); // 2nd way: treat Transforms as wrappers to underlying library. // you can redefine parameters const std::size_t estimatedSize = adiosSZ.BufferMaxSize(Nx * var.m_ElementSize); std::vector<char> compressedBuffer(estimatedSize); size_t compressedSize = adiosSZ.Compress( myvars.data(), var.m_Count, var.m_ElementSize, var.m_Type, compressedBuffer.data(), {{"accuracy", "0.01"}}); compressedBuffer.resize(compressedSize); std::cout << "Rank " << rank << "\n"; std::cout << "Compression summary:\n"; std::cout << "Input data size: " << inputBytes << " bytes\n"; std::cout << "SZ estimated output size: " << estimatedSize << " bytes\n"; std::cout << "SZ final output size: " << compressedSize << " bytes\n\n"; // Allocate original data size std::vector<double> decompressedBuffer(Nx); size_t decompressedSize = adiosSZ.Decompress( compressedBuffer.data(), compressedSize, decompressedBuffer.data(), var.m_Count, var.m_Type, var.m_OperatorsInfo[SZID].Parameters); std::cout << "Decompression summary:\n"; std::cout << "Decompressed size: " << decompressedSize << " bytes\n"; std::cout << "Data:\n"; for (int i = 0; i < decompressedBuffer.size(); i++) { if (i % 25 == 0) { std::cout << "\n"; } std::cout << decompressedBuffer[i] << " "; } std::cout << "\n"; } catch (std::invalid_argument &e) { std::cout << "Invalid argument exception, STOPPING PROGRAM from rank " << rank << "\n"; std::cout << e.what() << "\n"; } catch (std::ios_base::failure &e) { std::cout << "IO System base failure exception, STOPPING PROGRAM from rank " << rank << "\n"; std::cout << e.what() << "\n"; } catch (std::exception &e) { std::cout << "Exception, STOPPING PROGRAM from rank " << rank << "\n"; std::cout << e.what() << "\n"; } MPI_Finalize(); return 0; }
1
12,065
Keep the mpi.h include, just move it to after adios2.h and guard with the ifdef. Otherwise everything else looks good.
ornladios-ADIOS2
cpp
@@ -72,8 +72,12 @@ type Config struct { // bind mounts are writtable. Readonlyfs bool `json:"readonlyfs"` - // Privatefs will mount the container's rootfs as private where mount points from the parent will not propogate - Privatefs bool `json:"privatefs"` + // RootfsMountMode is the rootfs mount propagation mode. + // On linux it is one of the followings: + // "private": rootfs is mounted as MS_PRIVATE + // "shared": rootfs is mounted as MS_SHARED + // "slave": rootfs is mounted as MS_SLAVE + RootfsMountMode PropagationMode `json:"root_mount_mode"` // Mounts specify additional source and destination paths that will be mounted inside the container's // rootfs and mount namespace if specified
1
package configs type Rlimit struct { Type int `json:"type"` Hard uint64 `json:"hard"` Soft uint64 `json:"soft"` } // IDMap represents UID/GID Mappings for User Namespaces. type IDMap struct { ContainerID int `json:"container_id"` HostID int `json:"host_id"` Size int `json:"size"` } type Seccomp struct { Syscalls []*Syscall `json:"syscalls"` } type Action int const ( Kill Action = iota - 3 Trap Allow ) type Operator int const ( EqualTo Operator = iota NotEqualTo GreatherThan LessThan MaskEqualTo ) type Arg struct { Index int `json:"index"` Value uint32 `json:"value"` Op Operator `json:"op"` } type Syscall struct { Value int `json:"value"` Action Action `json:"action"` Args []*Arg `json:"args"` } // TODO Windows. Many of these fields should be factored out into those parts // which are common across platforms, and those which are platform specific. // Config defines configuration options for executing a process inside a contained environment. type Config struct { // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs // This is a common option when the container is running in ramdisk NoPivotRoot bool `json:"no_pivot_root"` // ParentDeathSignal specifies the signal that is sent to the container's process in the case // that the parent process dies. ParentDeathSignal int `json:"parent_death_signal"` // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set. // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable. // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot. PivotDir string `json:"pivot_dir"` // Path to a directory containing the container's root filesystem. Rootfs string `json:"rootfs"` // Readonlyfs will remount the container's rootfs as readonly where only externally mounted // bind mounts are writtable. Readonlyfs bool `json:"readonlyfs"` // Privatefs will mount the container's rootfs as private where mount points from the parent will not propogate Privatefs bool `json:"privatefs"` // Mounts specify additional source and destination paths that will be mounted inside the container's // rootfs and mount namespace if specified Mounts []*Mount `json:"mounts"` // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! Devices []*Device `json:"devices"` MountLabel string `json:"mount_label"` // Hostname optionally sets the container's hostname if provided Hostname string `json:"hostname"` // Namespaces specifies the container's namespaces that it should setup when cloning the init process // If a namespace is not provided that namespace is shared from the container's parent process Namespaces Namespaces `json:"namespaces"` // Capabilities specify the capabilities to keep when executing the process inside the container // All capbilities not specified will be dropped from the processes capability mask Capabilities []string `json:"capabilities"` // Networks specifies the container's network setup to be created Networks []*Network `json:"networks"` // Routes can be specified to create entries in the route table as the container is started Routes []*Route `json:"routes"` // Cgroups specifies specific cgroup settings for the various subsystems that the container is // placed into to limit the resources the container has available Cgroups *Cgroup `json:"cgroups"` // AppArmorProfile specifies the profile to apply to the process running in the container and is // change at the time the process is execed AppArmorProfile string `json:"apparmor_profile"` // ProcessLabel specifies the label to apply to the process running in the container. It is // commonly used by selinux ProcessLabel string `json:"process_label"` // Rlimits specifies the resource limits, such as max open files, to set in the container // If Rlimits are not set, the container will inherit rlimits from the parent process Rlimits []Rlimit `json:"rlimits"` // AdditionalGroups specifies the gids that should be added to supplementary groups // in addition to those that the user belongs to. AdditionalGroups []string `json:"additional_groups"` // UidMappings is an array of User ID mappings for User Namespaces UidMappings []IDMap `json:"uid_mappings"` // GidMappings is an array of Group ID mappings for User Namespaces GidMappings []IDMap `json:"gid_mappings"` // MaskPaths specifies paths within the container's rootfs to mask over with a bind // mount pointing to /dev/null as to prevent reads of the file. MaskPaths []string `json:"mask_paths"` // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only // so that these files prevent any writes. ReadonlyPaths []string `json:"readonly_paths"` // Sysctl is a map of properties and their values. It is the equivalent of using // sysctl -w my.property.name value in Linux. Sysctl map[string]string `json:"sysctl"` // Seccomp allows actions to be taken whenever a syscall is made within the container. // By default, all syscalls are allowed with actions to allow, trap, kill, or return an errno // can be specified on a per syscall basis. Seccomp *Seccomp `json:"seccomp"` }
1
6,865
Shouldn't this be something like `rootmountmode` to fit the pattern of the other fields' serialized representations?
opencontainers-runc
go
@@ -747,9 +747,7 @@ void Cuda::impl_initialize(const Cuda::SelectDevice config, size_t /*num_instances*/) { Impl::CudaInternal::singleton().initialize(config.cuda_device_id, 0); -#if defined(KOKKOS_ENABLE_PROFILING) Kokkos::Profiling::initialize(); -#endif } std::vector<unsigned> Cuda::detect_device_arch() {
1
/* //@HEADER // ************************************************************************ // // Kokkos v. 3.0 // Copyright (2020) National Technology & Engineering // Solutions of Sandia, LLC (NTESS). // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott ([email protected]) // // ************************************************************************ //@HEADER */ /*--------------------------------------------------------------------------*/ /* Kokkos interfaces */ #include <Kokkos_Macros.hpp> #ifdef KOKKOS_ENABLE_CUDA #include <Kokkos_Core.hpp> #include <Cuda/Kokkos_Cuda_Error.hpp> #include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp> #include <Cuda/Kokkos_Cuda_Instance.hpp> #include <Cuda/Kokkos_Cuda_Locks.hpp> #include <impl/Kokkos_Error.hpp> #include <impl/Kokkos_Tools.hpp> /*--------------------------------------------------------------------------*/ /* Standard 'C' libraries */ #include <cstdlib> /* Standard 'C++' libraries */ #include <vector> #include <iostream> #include <sstream> #include <string> #ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION namespace Kokkos { namespace Impl { bool CudaInternal::kokkos_impl_cuda_use_serial_execution_v = false; void CudaInternal::cuda_set_serial_execution(bool val) { CudaInternal::kokkos_impl_cuda_use_serial_execution_v = val; } bool CudaInternal::cuda_use_serial_execution() { return CudaInternal::kokkos_impl_cuda_use_serial_execution_v; } } // namespace Impl } // namespace Kokkos void kokkos_impl_cuda_set_serial_execution(bool val) { Kokkos::Impl::CudaInternal::cuda_set_serial_execution(val); } bool kokkos_impl_cuda_use_serial_execution() { return Kokkos::Impl::CudaInternal::cuda_use_serial_execution(); } #endif #ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE __device__ __constant__ unsigned long kokkos_impl_cuda_constant_memory_buffer [Kokkos::Impl::CudaTraits::ConstantMemoryUsage / sizeof(unsigned long)]; #endif /*--------------------------------------------------------------------------*/ namespace Kokkos { namespace Impl { namespace { __global__ void query_cuda_kernel_arch(int *d_arch) { #if defined(__CUDA_ARCH__) *d_arch = __CUDA_ARCH__; #else *d_arch = 0; #endif } /** Query what compute capability is actually launched to the device: */ int cuda_kernel_arch() { int arch = 0; int *d_arch = nullptr; cudaMalloc((void **)&d_arch, sizeof(int)); cudaMemcpy(d_arch, &arch, sizeof(int), cudaMemcpyDefault); query_cuda_kernel_arch<<<1, 1>>>(d_arch); cudaMemcpy(&arch, d_arch, sizeof(int), cudaMemcpyDefault); cudaFree(d_arch); return arch; } #ifdef KOKKOS_ENABLE_CUDA_UVM bool cuda_launch_blocking() { const char *env = getenv("CUDA_LAUNCH_BLOCKING"); if (env == 0) return false; return std::stoi(env); } #endif } // namespace void cuda_device_synchronize() { CUDA_SAFE_CALL(cudaDeviceSynchronize()); } void cuda_internal_error_throw(cudaError e, const char *name, const char *file, const int line) { std::ostringstream out; out << name << " error( " << cudaGetErrorName(e) << "): " << cudaGetErrorString(e); if (file) { out << " " << file << ":" << line; } throw_runtime_exception(out.str()); } //---------------------------------------------------------------------------- // Some significant cuda device properties: // // cudaDeviceProp::name : Text label for device // cudaDeviceProp::major : Device major number // cudaDeviceProp::minor : Device minor number // cudaDeviceProp::warpSize : number of threads per warp // cudaDeviceProp::multiProcessorCount : number of multiprocessors // cudaDeviceProp::sharedMemPerBlock : capacity of shared memory per block // cudaDeviceProp::totalConstMem : capacity of constant memory // cudaDeviceProp::totalGlobalMem : capacity of global memory // cudaDeviceProp::maxGridSize[3] : maximum grid size // // Section 4.4.2.4 of the CUDA Toolkit Reference Manual // // struct cudaDeviceProp { // char name[256]; // size_t totalGlobalMem; // size_t sharedMemPerBlock; // int regsPerBlock; // int warpSize; // size_t memPitch; // int maxThreadsPerBlock; // int maxThreadsDim[3]; // int maxGridSize[3]; // size_t totalConstMem; // int major; // int minor; // int clockRate; // size_t textureAlignment; // int deviceOverlap; // int multiProcessorCount; // int kernelExecTimeoutEnabled; // int integrated; // int canMapHostMemory; // int computeMode; // int concurrentKernels; // int ECCEnabled; // int pciBusID; // int pciDeviceID; // int tccDriver; // int asyncEngineCount; // int unifiedAddressing; // int memoryClockRate; // int memoryBusWidth; // int l2CacheSize; // int maxThreadsPerMultiProcessor; // }; namespace { class CudaInternalDevices { public: enum { MAXIMUM_DEVICE_COUNT = 64 }; struct cudaDeviceProp m_cudaProp[MAXIMUM_DEVICE_COUNT]; int m_cudaDevCount; CudaInternalDevices(); static const CudaInternalDevices &singleton(); }; CudaInternalDevices::CudaInternalDevices() { // See 'cudaSetDeviceFlags' for host-device thread interaction // Section 4.4.2.6 of the CUDA Toolkit Reference Manual CUDA_SAFE_CALL(cudaGetDeviceCount(&m_cudaDevCount)); if (m_cudaDevCount > MAXIMUM_DEVICE_COUNT) { Kokkos::abort( "Sorry, you have more GPUs per node than we thought anybody would ever " "have. Please report this to github.com/kokkos/kokkos."); } for (int i = 0; i < m_cudaDevCount; ++i) { CUDA_SAFE_CALL(cudaGetDeviceProperties(m_cudaProp + i, i)); } } const CudaInternalDevices &CudaInternalDevices::singleton() { static CudaInternalDevices self; return self; } } // namespace int CudaInternal::was_initialized = 0; int CudaInternal::was_finalized = 0; //---------------------------------------------------------------------------- void CudaInternal::print_configuration(std::ostream &s) const { const CudaInternalDevices &dev_info = CudaInternalDevices::singleton(); #if defined(KOKKOS_ENABLE_CUDA) s << "macro KOKKOS_ENABLE_CUDA : defined" << std::endl; #endif #if defined(CUDA_VERSION) s << "macro CUDA_VERSION = " << CUDA_VERSION << " = version " << CUDA_VERSION / 1000 << "." << (CUDA_VERSION % 1000) / 10 << std::endl; #endif for (int i = 0; i < dev_info.m_cudaDevCount; ++i) { s << "Kokkos::Cuda[ " << i << " ] " << dev_info.m_cudaProp[i].name << " capability " << dev_info.m_cudaProp[i].major << "." << dev_info.m_cudaProp[i].minor << ", Total Global Memory: " << human_memory_size(dev_info.m_cudaProp[i].totalGlobalMem) << ", Shared Memory per Block: " << human_memory_size(dev_info.m_cudaProp[i].sharedMemPerBlock); if (m_cudaDev == i) s << " : Selected"; s << std::endl; } } //---------------------------------------------------------------------------- CudaInternal::~CudaInternal() { if (m_stream || m_scratchSpace || m_scratchFlags || m_scratchUnified || m_scratchConcurrentBitset) { std::cerr << "Kokkos::Cuda ERROR: Failed to call Kokkos::Cuda::finalize()" << std::endl; std::cerr.flush(); } m_cudaDev = -1; m_cudaArch = -1; m_multiProcCount = 0; m_maxWarpCount = 0; m_maxBlock = 0; m_maxSharedWords = 0; m_maxConcurrency = 0; m_scratchSpaceCount = 0; m_scratchFlagsCount = 0; m_scratchUnifiedCount = 0; m_scratchUnifiedSupported = 0; m_streamCount = 0; m_scratchSpace = 0; m_scratchFlags = 0; m_scratchUnified = 0; m_scratchConcurrentBitset = 0; m_stream = 0; } int CudaInternal::verify_is_initialized(const char *const label) const { if (m_cudaDev < 0) { std::cerr << "Kokkos::Cuda::" << label << " : ERROR device not initialized" << std::endl; } return 0 <= m_cudaDev; } CudaInternal &CudaInternal::singleton() { static CudaInternal self; return self; } void CudaInternal::fence() const { cudaStreamSynchronize(m_stream); } void CudaInternal::initialize(int cuda_device_id, cudaStream_t stream) { if (was_finalized) Kokkos::abort("Calling Cuda::initialize after Cuda::finalize is illegal\n"); was_initialized = 1; if (is_initialized()) return; enum { WordSize = sizeof(size_type) }; #ifndef KOKKOS_IMPL_TURN_OFF_CUDA_HOST_INIT_CHECK if (!HostSpace::execution_space::impl_is_initialized()) { const std::string msg( "Cuda::initialize ERROR : HostSpace::execution_space is not " "initialized"); throw_runtime_exception(msg); } #endif const CudaInternalDevices &dev_info = CudaInternalDevices::singleton(); const bool ok_init = 0 == m_scratchSpace || 0 == m_scratchFlags; const bool ok_id = 0 <= cuda_device_id && cuda_device_id < dev_info.m_cudaDevCount; // Need device capability 3.0 or better const bool ok_dev = ok_id && (3 <= dev_info.m_cudaProp[cuda_device_id].major && 0 <= dev_info.m_cudaProp[cuda_device_id].minor); if (ok_init && ok_dev) { const struct cudaDeviceProp &cudaProp = dev_info.m_cudaProp[cuda_device_id]; m_cudaDev = cuda_device_id; m_deviceProp = cudaProp; CUDA_SAFE_CALL(cudaSetDevice(m_cudaDev)); Kokkos::Impl::cuda_device_synchronize(); // Query what compute capability architecture a kernel executes: m_cudaArch = cuda_kernel_arch(); if (m_cudaArch == 0) { std::stringstream ss; ss << "Kokkos::Cuda::initialize ERROR: likely mismatch of architecture" << std::endl; std::string msg = ss.str(); Kokkos::abort(msg.c_str()); } int compiled_major = m_cudaArch / 100; int compiled_minor = (m_cudaArch % 100) / 10; if (compiled_major != cudaProp.major || compiled_minor > cudaProp.minor) { std::stringstream ss; ss << "Kokkos::Cuda::initialize ERROR: running kernels compiled for " "compute capability " << compiled_major << "." << compiled_minor << " on device with compute capability " << cudaProp.major << "." << cudaProp.minor << " is not supported by CUDA!" << std::endl; std::string msg = ss.str(); Kokkos::abort(msg.c_str()); } if (Kokkos::show_warnings() && (compiled_major != cudaProp.major || compiled_minor != cudaProp.minor)) { std::cerr << "Kokkos::Cuda::initialize WARNING: running kernels compiled " "for compute capability " << compiled_major << "." << compiled_minor << " on device with compute capability " << cudaProp.major << "." << cudaProp.minor << " , this will likely reduce potential performance." << std::endl; } // number of multiprocessors m_multiProcCount = cudaProp.multiProcessorCount; //---------------------------------- // Maximum number of warps, // at most one warp per thread in a warp for reduction. m_maxWarpCount = cudaProp.maxThreadsPerBlock / Impl::CudaTraits::WarpSize; if (Impl::CudaTraits::WarpSize < m_maxWarpCount) { m_maxWarpCount = Impl::CudaTraits::WarpSize; } m_maxSharedWords = cudaProp.sharedMemPerBlock / WordSize; //---------------------------------- // Maximum number of blocks: m_maxBlock = cudaProp.maxGridSize[0]; m_shmemPerSM = cudaProp.sharedMemPerMultiprocessor; m_maxShmemPerBlock = cudaProp.sharedMemPerBlock; m_regsPerSM = cudaProp.regsPerMultiprocessor; m_maxBlocksPerSM = m_cudaArch < 500 ? 16 : (m_cudaArch < 750 ? 32 : (m_cudaArch == 750 ? 16 : 32)); m_maxThreadsPerSM = cudaProp.maxThreadsPerMultiProcessor; m_maxThreadsPerBlock = cudaProp.maxThreadsPerBlock; //---------------------------------- m_scratchUnifiedSupported = cudaProp.unifiedAddressing; if (Kokkos::show_warnings() && !m_scratchUnifiedSupported) { std::cerr << "Kokkos::Cuda device " << cudaProp.name << " capability " << cudaProp.major << "." << cudaProp.minor << " does not support unified virtual address space" << std::endl; } //---------------------------------- // Multiblock reduction uses scratch flags for counters // and scratch space for partial reduction values. // Allocate some initial space. This will grow as needed. { const unsigned reduce_block_count = m_maxWarpCount * Impl::CudaTraits::WarpSize; (void)scratch_unified(16 * sizeof(size_type)); (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type)); (void)scratch_space(reduce_block_count * 16 * sizeof(size_type)); } //---------------------------------- // Concurrent bitset for obtaining unique tokens from within // an executing kernel. { m_maxConcurrency = m_maxThreadsPerSM * cudaProp.multiProcessorCount; const int32_t buffer_bound = Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency); // Allocate and initialize uint32_t[ buffer_bound ] typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void> Record; Record *const r = Record::allocate(Kokkos::CudaSpace(), "InternalScratchBitset", sizeof(uint32_t) * buffer_bound); Record::increment(r); m_scratchConcurrentBitset = reinterpret_cast<uint32_t *>(r->data()); CUDA_SAFE_CALL(cudaMemset(m_scratchConcurrentBitset, 0, sizeof(uint32_t) * buffer_bound)); } //---------------------------------- } else { std::ostringstream msg; msg << "Kokkos::Cuda::initialize(" << cuda_device_id << ") FAILED"; if (!ok_init) { msg << " : Already initialized"; } if (!ok_id) { msg << " : Device identifier out of range " << "[0.." << dev_info.m_cudaDevCount << "]"; } else if (!ok_dev) { msg << " : Device "; msg << dev_info.m_cudaProp[cuda_device_id].major; msg << "."; msg << dev_info.m_cudaProp[cuda_device_id].minor; msg << " has insufficient capability, required 3.0 or better"; } Kokkos::Impl::throw_runtime_exception(msg.str()); } #ifdef KOKKOS_ENABLE_CUDA_UVM if (Kokkos::show_warnings() && !cuda_launch_blocking()) { std::cerr << "Kokkos::Cuda::initialize WARNING: Cuda is allocating into " "UVMSpace by default" << std::endl; std::cerr << " without setting " "CUDA_LAUNCH_BLOCKING=1." << std::endl; std::cerr << " The code must call " "Cuda().fence() after each kernel" << std::endl; std::cerr << " or will likely crash when " "accessing data on the host." << std::endl; } const char *env_force_device_alloc = getenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC"); bool force_device_alloc; if (env_force_device_alloc == 0) force_device_alloc = false; else force_device_alloc = std::stoi(env_force_device_alloc) != 0; const char *env_visible_devices = getenv("CUDA_VISIBLE_DEVICES"); bool visible_devices_one = true; if (env_visible_devices == 0) visible_devices_one = false; if (Kokkos::show_warnings() && (!visible_devices_one && !force_device_alloc)) { std::cerr << "Kokkos::Cuda::initialize WARNING: Cuda is allocating into " "UVMSpace by default" << std::endl; std::cerr << " without setting " "CUDA_MANAGED_FORCE_DEVICE_ALLOC=1 or " << std::endl; std::cerr << " setting CUDA_VISIBLE_DEVICES." << std::endl; std::cerr << " This could on multi GPU " "systems lead to severe performance" << std::endl; std::cerr << " penalties." << std::endl; } #endif #ifdef KOKKOS_ENABLE_PRE_CUDA_10_DEPRECATION_API cudaThreadSetCacheConfig(cudaFuncCachePreferShared); #else cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); #endif // Init the array for used for arbitrarily sized atomics if (stream == 0) Impl::initialize_host_cuda_lock_arrays(); m_stream = stream; } //---------------------------------------------------------------------------- typedef Cuda::size_type ScratchGrain[Impl::CudaTraits::WarpSize]; enum { sizeScratchGrain = sizeof(ScratchGrain) }; Cuda::size_type *CudaInternal::scratch_flags(const Cuda::size_type size) const { if (verify_is_initialized("scratch_flags") && m_scratchFlagsCount * sizeScratchGrain < size) { m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain; typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void> Record; if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags)); Record *const r = Record::allocate(Kokkos::CudaSpace(), "InternalScratchFlags", (sizeof(ScratchGrain) * m_scratchFlagsCount)); Record::increment(r); m_scratchFlags = reinterpret_cast<size_type *>(r->data()); CUDA_SAFE_CALL( cudaMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain)); } return m_scratchFlags; } Cuda::size_type *CudaInternal::scratch_space(const Cuda::size_type size) const { if (verify_is_initialized("scratch_space") && m_scratchSpaceCount * sizeScratchGrain < size) { m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain; typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void> Record; if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace)); Record *const r = Record::allocate(Kokkos::CudaSpace(), "InternalScratchSpace", (sizeof(ScratchGrain) * m_scratchSpaceCount)); Record::increment(r); m_scratchSpace = reinterpret_cast<size_type *>(r->data()); } return m_scratchSpace; } Cuda::size_type *CudaInternal::scratch_unified( const Cuda::size_type size) const { if (verify_is_initialized("scratch_unified") && m_scratchUnifiedSupported && m_scratchUnifiedCount * sizeScratchGrain < size) { m_scratchUnifiedCount = (size + sizeScratchGrain - 1) / sizeScratchGrain; typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void> Record; if (m_scratchUnified) Record::decrement(Record::get_record(m_scratchUnified)); Record *const r = Record::allocate( Kokkos::CudaHostPinnedSpace(), "InternalScratchUnified", (sizeof(ScratchGrain) * m_scratchUnifiedCount)); Record::increment(r); m_scratchUnified = reinterpret_cast<size_type *>(r->data()); } return m_scratchUnified; } Cuda::size_type *CudaInternal::scratch_functor( const Cuda::size_type size) const { if (verify_is_initialized("scratch_functor") && m_scratchFunctorSize < size) { m_scratchFunctorSize = size; typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void> Record; if (m_scratchFunctor) Record::decrement(Record::get_record(m_scratchFunctor)); Record *const r = Record::allocate( Kokkos::CudaSpace(), "InternalScratchFunctor", m_scratchFunctorSize); Record::increment(r); m_scratchFunctor = reinterpret_cast<size_type *>(r->data()); } return m_scratchFunctor; } //---------------------------------------------------------------------------- void CudaInternal::finalize() { was_finalized = 1; if (0 != m_scratchSpace || 0 != m_scratchFlags) { Impl::finalize_host_cuda_lock_arrays(); if (m_stream != 0) cudaStreamDestroy(m_stream); typedef Kokkos::Impl::SharedAllocationRecord<CudaSpace> RecordCuda; typedef Kokkos::Impl::SharedAllocationRecord<CudaHostPinnedSpace> RecordHost; RecordCuda::decrement(RecordCuda::get_record(m_scratchFlags)); RecordCuda::decrement(RecordCuda::get_record(m_scratchSpace)); RecordHost::decrement(RecordHost::get_record(m_scratchUnified)); RecordCuda::decrement(RecordCuda::get_record(m_scratchConcurrentBitset)); if (m_scratchFunctorSize > 0) RecordCuda::decrement(RecordCuda::get_record(m_scratchFunctor)); m_cudaDev = -1; m_multiProcCount = 0; m_maxWarpCount = 0; m_maxBlock = 0; m_maxSharedWords = 0; m_scratchSpaceCount = 0; m_scratchFlagsCount = 0; m_scratchUnifiedCount = 0; m_streamCount = 0; m_scratchSpace = 0; m_scratchFlags = 0; m_scratchUnified = 0; m_scratchConcurrentBitset = 0; m_stream = 0; } } //---------------------------------------------------------------------------- Cuda::size_type cuda_internal_multiprocessor_count() { return CudaInternal::singleton().m_multiProcCount; } CudaSpace::size_type cuda_internal_maximum_concurrent_block_count() { #if defined(KOKKOS_ARCH_KEPLER) // Compute capability 3.0 through 3.7 enum : int { max_resident_blocks_per_multiprocessor = 16 }; #else // Compute capability 5.0 through 6.2 enum : int { max_resident_blocks_per_multiprocessor = 32 }; #endif return CudaInternal::singleton().m_multiProcCount * max_resident_blocks_per_multiprocessor; }; Cuda::size_type cuda_internal_maximum_warp_count() { return CudaInternal::singleton().m_maxWarpCount; } Cuda::size_type cuda_internal_maximum_grid_count() { return CudaInternal::singleton().m_maxBlock; } Cuda::size_type cuda_internal_maximum_shared_words() { return CudaInternal::singleton().m_maxSharedWords; } Cuda::size_type *cuda_internal_scratch_space(const Cuda &instance, const Cuda::size_type size) { return instance.impl_internal_space_instance()->scratch_space(size); } Cuda::size_type *cuda_internal_scratch_flags(const Cuda &instance, const Cuda::size_type size) { return instance.impl_internal_space_instance()->scratch_flags(size); } Cuda::size_type *cuda_internal_scratch_unified(const Cuda &instance, const Cuda::size_type size) { return instance.impl_internal_space_instance()->scratch_unified(size); } } // namespace Impl } // namespace Kokkos //---------------------------------------------------------------------------- namespace Kokkos { Cuda::size_type Cuda::detect_device_count() { return Impl::CudaInternalDevices::singleton().m_cudaDevCount; } int Cuda::concurrency() { return Impl::CudaInternal::singleton().m_maxConcurrency; } int Cuda::impl_is_initialized() { return Impl::CudaInternal::singleton().is_initialized(); } void Cuda::impl_initialize(const Cuda::SelectDevice config, size_t /*num_instances*/) { Impl::CudaInternal::singleton().initialize(config.cuda_device_id, 0); #if defined(KOKKOS_ENABLE_PROFILING) Kokkos::Profiling::initialize(); #endif } std::vector<unsigned> Cuda::detect_device_arch() { const Impl::CudaInternalDevices &s = Impl::CudaInternalDevices::singleton(); std::vector<unsigned> output(s.m_cudaDevCount); for (int i = 0; i < s.m_cudaDevCount; ++i) { output[i] = s.m_cudaProp[i].major * 100 + s.m_cudaProp[i].minor; } return output; } Cuda::size_type Cuda::device_arch() { const int dev_id = Impl::CudaInternal::singleton().m_cudaDev; int dev_arch = 0; if (0 <= dev_id) { const struct cudaDeviceProp &cudaProp = Impl::CudaInternalDevices::singleton().m_cudaProp[dev_id]; dev_arch = cudaProp.major * 100 + cudaProp.minor; } return dev_arch; } void Cuda::impl_finalize() { Impl::CudaInternal::singleton().finalize(); #if defined(KOKKOS_ENABLE_PROFILING) Kokkos::Profiling::finalize(); #endif } Cuda::Cuda() : m_space_instance(&Impl::CudaInternal::singleton()) { Impl::CudaInternal::singleton().verify_is_initialized( "Cuda instance constructor"); } Cuda::Cuda(cudaStream_t stream) : m_space_instance(new Impl::CudaInternal) { Impl::CudaInternal::singleton().verify_is_initialized( "Cuda instance constructor"); m_space_instance->initialize(Impl::CudaInternal::singleton().m_cudaDev, stream); } void Cuda::print_configuration(std::ostream &s, const bool) { Impl::CudaInternal::singleton().print_configuration(s); } void Cuda::impl_static_fence() { Kokkos::Impl::cuda_device_synchronize(); } void Cuda::fence() const { m_space_instance->fence(); } const char *Cuda::name() { return "Cuda"; } cudaStream_t Cuda::cuda_stream() const { return m_space_instance->m_stream; } int Cuda::cuda_device() const { return m_space_instance->m_cudaDev; } const cudaDeviceProp &Cuda::cuda_device_prop() const { return m_space_instance->m_deviceProp; } } // namespace Kokkos namespace Kokkos { namespace Experimental { UniqueToken<Kokkos::Cuda, Kokkos::Experimental::UniqueTokenScope::Global>:: UniqueToken(Kokkos::Cuda const &) : m_buffer( Kokkos::Impl::CudaInternal::singleton().m_scratchConcurrentBitset), m_count(Kokkos::Impl::CudaInternal::singleton().m_maxConcurrency) {} } // namespace Experimental } // namespace Kokkos #else void KOKKOS_CORE_SRC_CUDA_IMPL_PREVENT_LINK_ERROR() {} #endif // KOKKOS_ENABLE_CUDA
1
23,667
Why were we initializing here in the first place?
kokkos-kokkos
cpp
@@ -0,0 +1,7 @@ +namespace MvvmCross.Core.Platform.LogProviders +{ + internal sealed class NullLogProvider : MvxBaseLogProvider + { + protected override Logger GetLogger(string name) => new Logger((logLevel, messageFunc, exception, formatParameters) => true); + } +}
1
1
13,437
This should be removed, and instead set the logger to None.
MvvmCross-MvvmCross
.cs
@@ -49,7 +49,7 @@ function UndoRedo(instance) { return; } - var originalData = plugin.instance.getSourceDataArray(); + var originalData = plugin.instance.getSourceData(); index = (originalData.length + index) % originalData.length;
1
/** * Handsontable UndoRedo class */ import Hooks from './../../pluginHooks'; import {arrayMap} from './../../helpers/array'; import {rangeEach} from './../../helpers/number'; import {inherit, deepClone} from './../../helpers/object'; import {stopImmediatePropagation} from './../../helpers/dom/event'; import {CellCoords} from './../../3rdparty/walkontable/src'; /** * @description * Handsontable UndoRedo plugin. It allows to undo and redo certain actions done in the table. * Please note, that not all actions are currently undo-able. * * @example * ```js * ... * undo: true * ... * ``` * @class UndoRedo * @plugin UndoRedo */ function UndoRedo(instance) { let plugin = this; this.instance = instance; this.doneActions = []; this.undoneActions = []; this.ignoreNewActions = false; instance.addHook('afterChange', (changes, source) => { if (changes && source !== 'UndoRedo.undo' && source !== 'UndoRedo.redo') { plugin.done(new UndoRedo.ChangeAction(changes)); } }); instance.addHook('afterCreateRow', (index, amount, source) => { if (source === 'UndoRedo.undo' || source === 'UndoRedo.undo' || source === 'auto') { return; } let action = new UndoRedo.CreateRowAction(index, amount); plugin.done(action); }); instance.addHook('beforeRemoveRow', (index, amount, logicRows, source) => { if (source === 'UndoRedo.undo' || source === 'UndoRedo.redo' || source === 'auto') { return; } var originalData = plugin.instance.getSourceDataArray(); index = (originalData.length + index) % originalData.length; var removedData = deepClone(originalData.slice(index, index + amount)); plugin.done(new UndoRedo.RemoveRowAction(index, removedData)); }); instance.addHook('afterCreateCol', (index, amount, source) => { if (source === 'UndoRedo.undo' || source === 'UndoRedo.redo' || source === 'auto') { return; } plugin.done(new UndoRedo.CreateColumnAction(index, amount)); }); instance.addHook('beforeRemoveCol', (index, amount, logicColumns, source) => { if (source === 'UndoRedo.undo' || source === 'UndoRedo.redo' || source === 'auto') { return; } let originalData = plugin.instance.getSourceDataArray(); index = (plugin.instance.countCols() + index) % plugin.instance.countCols(); let removedData = []; let headers = []; let indexes = []; rangeEach(originalData.length - 1, (i) => { let column = []; let origRow = originalData[i]; rangeEach(index, index + (amount - 1), (j) => { column.push(origRow[instance.runHooks('modifyCol', j)]); }); removedData.push(column); }); rangeEach(amount - 1, (i) => { indexes.push(instance.runHooks('modifyCol', index + i)); }); if (Array.isArray(instance.getSettings().colHeaders)) { rangeEach(amount - 1, (i) => { headers.push(instance.getSettings().colHeaders[instance.runHooks('modifyCol', index + i)] || null); }); } let manualColumnMovePlugin = plugin.instance.getPlugin('manualColumnMove'); let columnsMap = manualColumnMovePlugin.isEnabled() ? manualColumnMovePlugin.columnsMapper.__arrayMap : []; let action = new UndoRedo.RemoveColumnAction(index, indexes, removedData, headers, columnsMap); plugin.done(action); }); instance.addHook('beforeCellAlignment', (stateBefore, range, type, alignment) => { let action = new UndoRedo.CellAlignmentAction(stateBefore, range, type, alignment); plugin.done(action); }); instance.addHook('beforeFilter', (conditionsStack) => { plugin.done(new UndoRedo.FiltersAction(conditionsStack)); }); instance.addHook('beforeRowMove', (movedRows, target) => { if (movedRows === false) { return; } plugin.done(new UndoRedo.RowMoveAction(movedRows, target)); }); }; UndoRedo.prototype.done = function(action) { if (!this.ignoreNewActions) { this.doneActions.push(action); this.undoneActions.length = 0; } }; /** * Undo last edit. * * @function undo * @memberof UndoRedo# */ UndoRedo.prototype.undo = function() { if (this.isUndoAvailable()) { let action = this.doneActions.pop(); let actionClone = deepClone(action); let instance = this.instance; let continueAction = instance.runHooks('beforeUndo', actionClone); if (continueAction === false) { return; } this.ignoreNewActions = true; let that = this; action.undo(this.instance, () => { that.ignoreNewActions = false; that.undoneActions.push(action); }); instance.runHooks('afterUndo', actionClone); } }; /** * Redo edit (used to reverse an undo). * * @function redo * @memberof UndoRedo# */ UndoRedo.prototype.redo = function() { if (this.isRedoAvailable()) { let action = this.undoneActions.pop(); let actionClone = deepClone(action); let instance = this.instance; let continueAction = instance.runHooks('beforeRedo', actionClone); if (continueAction === false) { return; } this.ignoreNewActions = true; let that = this; action.redo(this.instance, () => { that.ignoreNewActions = false; that.doneActions.push(action); }); instance.runHooks('afterRedo', actionClone); } }; /** * Check if undo action is available. * * @function isUndoAvailable * @memberof UndoRedo# * @return {Boolean} Return `true` if undo can be performed, `false` otherwise */ UndoRedo.prototype.isUndoAvailable = function() { return this.doneActions.length > 0; }; /** * Check if redo action is available. * * @function isRedoAvailable * @memberof UndoRedo# * @return {Boolean} Return `true` if redo can be performed, `false` otherwise. */ UndoRedo.prototype.isRedoAvailable = function() { return this.undoneActions.length > 0; }; /** * Clears undo history. * * @function clear * @memberof UndoRedo# */ UndoRedo.prototype.clear = function() { this.doneActions.length = 0; this.undoneActions.length = 0; }; UndoRedo.Action = function() {}; UndoRedo.Action.prototype.undo = function() {}; UndoRedo.Action.prototype.redo = function() {}; /** * Change action. */ UndoRedo.ChangeAction = function(changes) { this.changes = changes; this.actionType = 'change'; }; inherit(UndoRedo.ChangeAction, UndoRedo.Action); UndoRedo.ChangeAction.prototype.undo = function(instance, undoneCallback) { let data = deepClone(this.changes), emptyRowsAtTheEnd = instance.countEmptyRows(true), emptyColsAtTheEnd = instance.countEmptyCols(true); for (let i = 0, len = data.length; i < len; i++) { data[i].splice(3, 1); } instance.addHookOnce('afterChange', undoneCallback); instance.setDataAtRowProp(data, null, null, 'UndoRedo.undo'); for (let i = 0, len = data.length; i < len; i++) { if (instance.getSettings().minSpareRows && data[i][0] + 1 + instance.getSettings().minSpareRows === instance.countRows() && emptyRowsAtTheEnd == instance.getSettings().minSpareRows) { instance.alter('remove_row', parseInt(data[i][0] + 1, 10), instance.getSettings().minSpareRows); instance.undoRedo.doneActions.pop(); } if (instance.getSettings().minSpareCols && data[i][1] + 1 + instance.getSettings().minSpareCols === instance.countCols() && emptyColsAtTheEnd == instance.getSettings().minSpareCols) { instance.alter('remove_col', parseInt(data[i][1] + 1, 10), instance.getSettings().minSpareCols); instance.undoRedo.doneActions.pop(); } } }; UndoRedo.ChangeAction.prototype.redo = function(instance, onFinishCallback) { let data = deepClone(this.changes); for (let i = 0, len = data.length; i < len; i++) { data[i].splice(2, 1); } instance.addHookOnce('afterChange', onFinishCallback); instance.setDataAtRowProp(data, null, null, 'UndoRedo.redo'); }; /** * Create row action. */ UndoRedo.CreateRowAction = function(index, amount) { this.index = index; this.amount = amount; this.actionType = 'insert_row'; }; inherit(UndoRedo.CreateRowAction, UndoRedo.Action); UndoRedo.CreateRowAction.prototype.undo = function(instance, undoneCallback) { let rowCount = instance.countRows(), minSpareRows = instance.getSettings().minSpareRows; if (this.index >= rowCount && this.index - minSpareRows < rowCount) { this.index -= minSpareRows; // work around the situation where the needed row was removed due to an 'undo' of a made change } instance.addHookOnce('afterRemoveRow', undoneCallback); instance.alter('remove_row', this.index, this.amount, 'UndoRedo.undo'); }; UndoRedo.CreateRowAction.prototype.redo = function(instance, redoneCallback) { instance.addHookOnce('afterCreateRow', redoneCallback); instance.alter('insert_row', this.index, this.amount, 'UndoRedo.redo'); }; /** * Remove row action. */ UndoRedo.RemoveRowAction = function(index, data) { this.index = index; this.data = data; this.actionType = 'remove_row'; }; inherit(UndoRedo.RemoveRowAction, UndoRedo.Action); UndoRedo.RemoveRowAction.prototype.undo = function(instance, undoneCallback) { instance.alter('insert_row', this.index, this.data.length, 'UndoRedo.undo'); instance.addHookOnce('afterRender', undoneCallback); instance.populateFromArray(this.index, 0, this.data, void 0, void 0, 'UndoRedo.undo'); }; UndoRedo.RemoveRowAction.prototype.redo = function(instance, redoneCallback) { instance.addHookOnce('afterRemoveRow', redoneCallback); instance.alter('remove_row', this.index, this.data.length, 'UndoRedo.redo'); }; /** * Create column action. */ UndoRedo.CreateColumnAction = function(index, amount) { this.index = index; this.amount = amount; this.actionType = 'insert_col'; }; inherit(UndoRedo.CreateColumnAction, UndoRedo.Action); UndoRedo.CreateColumnAction.prototype.undo = function(instance, undoneCallback) { instance.addHookOnce('afterRemoveCol', undoneCallback); instance.alter('remove_col', this.index, this.amount, 'UndoRedo.undo'); }; UndoRedo.CreateColumnAction.prototype.redo = function(instance, redoneCallback) { instance.addHookOnce('afterCreateCol', redoneCallback); instance.alter('insert_col', this.index, this.amount, 'UndoRedo.redo'); }; /** * Remove column action. */ UndoRedo.RemoveColumnAction = function(index, indexes, data, headers, columnPositions) { this.index = index; this.indexes = indexes; this.data = data; this.amount = this.data[0].length; this.headers = headers; this.columnPositions = columnPositions.slice(0); this.actionType = 'remove_col'; }; inherit(UndoRedo.RemoveColumnAction, UndoRedo.Action); UndoRedo.RemoveColumnAction.prototype.undo = function(instance, undoneCallback) { let row; let ascendingIndexes = this.indexes.slice(0).sort(); let sortByIndexes = (elem, j, arr) => arr[this.indexes.indexOf(ascendingIndexes[j])]; let sortedData = []; rangeEach(this.data.length - 1, (i) => { sortedData[i] = arrayMap(this.data[i], sortByIndexes); }); let sortedHeaders = []; sortedHeaders = arrayMap(this.headers, sortByIndexes); var changes = []; // TODO: Temporary hook for undo/redo mess instance.runHooks('beforeCreateCol', this.indexes[0], this.indexes[this.indexes.length - 1], 'UndoRedo.undo'); rangeEach(this.data.length - 1, (i) => { row = instance.getSourceDataAtRow(i); rangeEach(ascendingIndexes.length - 1, (j) => { row.splice(ascendingIndexes[j], 0, sortedData[i][j]); changes.push([i, ascendingIndexes[j], null, sortedData[i][j]]); }); }); // TODO: Temporary hook for undo/redo mess if (instance.getPlugin('formulas')) { instance.getPlugin('formulas').onAfterSetDataAtCell(changes); } if (typeof this.headers !== 'undefined') { rangeEach(sortedHeaders.length - 1, (j) => { instance.getSettings().colHeaders.splice(ascendingIndexes[j], 0, sortedHeaders[j]); }); } if (instance.getPlugin('manualColumnMove')) { instance.getPlugin('manualColumnMove').columnsMapper.__arrayMap = this.columnPositions; } instance.addHookOnce('afterRender', undoneCallback); // TODO: Temporary hook for undo/redo mess instance.runHooks('afterCreateCol', this.indexes[0], this.indexes[this.indexes.length - 1], 'UndoRedo.undo'); if (instance.getPlugin('formulas')) { instance.getPlugin('formulas').recalculateFull(); } instance.render(); }; UndoRedo.RemoveColumnAction.prototype.redo = function(instance, redoneCallback) { instance.addHookOnce('afterRemoveCol', redoneCallback); instance.alter('remove_col', this.index, this.amount, 'UndoRedo.redo'); }; /** * Cell alignment action. */ UndoRedo.CellAlignmentAction = function(stateBefore, range, type, alignment) { this.stateBefore = stateBefore; this.range = range; this.type = type; this.alignment = alignment; }; UndoRedo.CellAlignmentAction.prototype.undo = function(instance, undoneCallback) { if (!instance.getPlugin('contextMenu').isEnabled()) { return; } for (var row = this.range.from.row; row <= this.range.to.row; row++) { for (var col = this.range.from.col; col <= this.range.to.col; col++) { instance.setCellMeta(row, col, 'className', this.stateBefore[row][col] || ' htLeft'); } } instance.addHookOnce('afterRender', undoneCallback); instance.render(); }; UndoRedo.CellAlignmentAction.prototype.redo = function(instance, undoneCallback) { if (!instance.getPlugin('contextMenu').isEnabled()) { return; } instance.selectCell(this.range.from.row, this.range.from.col, this.range.to.row, this.range.to.col); instance.getPlugin('contextMenu').executeCommand(`alignment:${this.alignment.replace('ht', '').toLowerCase()}`); instance.addHookOnce('afterRender', undoneCallback); instance.render(); }; /** * Filters action. */ UndoRedo.FiltersAction = function(conditionsStack) { this.conditionsStack = conditionsStack; this.actionType = 'filter'; }; inherit(UndoRedo.FiltersAction, UndoRedo.Action); UndoRedo.FiltersAction.prototype.undo = function(instance, undoneCallback) { let filters = instance.getPlugin('filters'); instance.addHookOnce('afterRender', undoneCallback); filters.conditionCollection.importAllConditions(this.conditionsStack.slice(0, this.conditionsStack.length - 1)); filters.filter(); }; UndoRedo.FiltersAction.prototype.redo = function(instance, redoneCallback) { let filters = instance.getPlugin('filters'); instance.addHookOnce('afterRender', redoneCallback); filters.conditionCollection.importAllConditions(this.conditionsStack); filters.filter(); }; /** * ManualRowMove action. * @TODO: removeRow undo should works on logical index */ UndoRedo.RowMoveAction = function(movedRows, target) { this.rows = movedRows.slice(); this.target = target; }; inherit(UndoRedo.RowMoveAction, UndoRedo.Action); UndoRedo.RowMoveAction.prototype.undo = function(instance, undoneCallback) { let manualRowMove = instance.getPlugin('manualRowMove'); instance.addHookOnce('afterRender', undoneCallback); manualRowMove.moveRows([this.target], this.rows[0]); instance.render(); instance.selection.setRangeStartOnly(new CellCoords(this.rows[0], 0)); instance.selection.setRangeEnd(new CellCoords(this.rows[this.rows.length - 1], instance.countCols() - 1)); }; UndoRedo.RowMoveAction.prototype.redo = function(instance, redoneCallback) { let manualRowMove = instance.getPlugin('manualRowMove'); instance.addHookOnce('afterRender', redoneCallback); manualRowMove.moveRows(this.rows.slice(), this.target); instance.render(); let startSelection = this.rows[0] < this.target ? this.target - this.rows.length : this.target; instance.selection.setRangeStartOnly(new CellCoords(startSelection, 0)); instance.selection.setRangeEnd(new CellCoords(startSelection + this.rows.length - 1, instance.countCols() - 1)); }; function init() { let instance = this; let pluginEnabled = typeof instance.getSettings().undo == 'undefined' || instance.getSettings().undo; if (pluginEnabled) { if (!instance.undoRedo) { /** * Instance of Handsontable.UndoRedo Plugin {@link Handsontable.UndoRedo} * * @alias undoRedo * @memberof! Handsontable.Core# * @type {UndoRedo} */ instance.undoRedo = new UndoRedo(instance); exposeUndoRedoMethods(instance); instance.addHook('beforeKeyDown', onBeforeKeyDown); instance.addHook('afterChange', onAfterChange); } } else if (instance.undoRedo) { delete instance.undoRedo; removeExposedUndoRedoMethods(instance); instance.removeHook('beforeKeyDown', onBeforeKeyDown); instance.removeHook('afterChange', onAfterChange); } } function onBeforeKeyDown(event) { let instance = this; let ctrlDown = (event.ctrlKey || event.metaKey) && !event.altKey; if (ctrlDown) { if (event.keyCode === 89 || (event.shiftKey && event.keyCode === 90)) { // CTRL + Y or CTRL + SHIFT + Z instance.undoRedo.redo(); stopImmediatePropagation(event); } else if (event.keyCode === 90) { // CTRL + Z instance.undoRedo.undo(); stopImmediatePropagation(event); } } } function onAfterChange(changes, source) { let instance = this; if (source === 'loadData') { return instance.undoRedo.clear(); } } function exposeUndoRedoMethods(instance) { /** * {@link UndoRedo#undo} * @alias undo * @memberof! Handsontable.Core# */ instance.undo = function() { return instance.undoRedo.undo(); }; /** * {@link UndoRedo#redo} * @alias redo * @memberof! Handsontable.Core# */ instance.redo = function() { return instance.undoRedo.redo(); }; /** * {@link UndoRedo#isUndoAvailable} * @alias isUndoAvailable * @memberof! Handsontable.Core# */ instance.isUndoAvailable = function() { return instance.undoRedo.isUndoAvailable(); }; /** * {@link UndoRedo#isRedoAvailable} * @alias isRedoAvailable * @memberof! Handsontable.Core# */ instance.isRedoAvailable = function() { return instance.undoRedo.isRedoAvailable(); }; /** * {@link UndoRedo#clear} * @alias clearUndo * @memberof! Handsontable.Core# */ instance.clearUndo = function() { return instance.undoRedo.clear(); }; } function removeExposedUndoRedoMethods(instance) { delete instance.undo; delete instance.redo; delete instance.isUndoAvailable; delete instance.isRedoAvailable; delete instance.clearUndo; } const hook = Hooks.getSingleton(); hook.add('afterInit', init); hook.add('afterUpdateSettings', init); hook.register('beforeUndo'); hook.register('afterUndo'); hook.register('beforeRedo'); hook.register('afterRedo'); export default UndoRedo;
1
14,170
Storing a reference to source data isn't the best choice. Maybe you can find a different way (without storing the reference) to save removed data?
handsontable-handsontable
js
@@ -7761,7 +7761,7 @@ void UDR::processData(UDRInvocationInfo &info, * * This method is called in debug Trafodion builds when certain * flags are set in the UDR_DEBUG_FLAGS CQD (CONTROL QUERY DEFAULT). - * See https://wiki.trafodion.org/wiki/index.php/Tutorial:_The_object-oriented_UDF_interface#Debugging_UDF_code + * See https://cwiki.apache.org/confluence/display/TRAFODION/Tutorial%3A+The+object-oriented+UDF+interface#Tutorial:Theobject-orientedUDFinterface-DebuggingUDFcode * for details. * * The default implementation prints out the process id and then
1
/********************************************************************** * * File: sqludr.h * Description: Interface between the SQL engine and routine bodies * Language: C * // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ * *********************************************************************/ #include "sqludr.h" #include <stdio.h> #include <cstdarg> #include <climits> #include <string.h> using namespace tmudr; // ------------------------------------------------------------------------ // This file includes implementations of methods defined in sqludr.h that // are of interest to UDR writers. // // For example, the default action for the C++ compiler interface for // TMUDFs are shown here. These can be called by TMUDFs, to provide // additional features and this source code can be used to decide whether // the default actions are sufficient for a given TMUDF.UDF developer // can inspect this code, copy and modify it for their own use or // call it from derived classes. // // This file gets compiled in Trafodion as part of // trafodion/core/sql/optimizer/UdfDllInteraction.cpp. // It does not need to be included into the DLL of the UDF. // // This file contains doxygen comments. To generate HTML documentation, // run this command: // // doxygen doxygen_tmudr.1.6.config // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Member functions for class UDRException // ------------------------------------------------------------------------ /** * Constructor with an integer value for SQLSTATE * * @param sqlState ISO/ANSI SQLSTATE value to produce for this error. * According to the standard, this must be a value in * the range of 38000 - 38999 (note that since we use * an integer, non-numeric SQLSTATE values cannot be * generated. * @param printf_format a format string like it is used in printf, * with a variable list of arguments to be substituted. * Example: * new UDRException(38001, "num %d, string %s", 1, "a"); */ UDRException::UDRException(int sqlState, const char *printf_format, ...) { va_list args; const int maxMsgLen = 250; char msg[maxMsgLen]; va_start(args, printf_format); vsnprintf(msg, maxMsgLen, printf_format, args); va_end(args); text_ = msg; snprintf(sqlState_, sizeof(sqlState_), "%05d", sqlState); } /** * Constructor with a string value for SQLSTATE * * @param sqlState ISO/ANSI SQLSTATE value to produce for this error. * According to the standard, this must be a value of * the form 38xxx, with the xxx being digits or upper * case letters. * @param printf_format a format string like it is used in printf, * with a variable list of arguments to be substituted. */ UDRException::UDRException(const char *sqlState, const char *printf_format, ...) { va_list args; const int maxMsgLen = 250; char msg[maxMsgLen]; va_start(args, printf_format); vsnprintf(msg, maxMsgLen, printf_format, args); va_end(args); text_ = msg; strncpy(sqlState_, sqlState, sizeof(sqlState_)); // add a NUL terminator in case we overflowed sqlState_[sizeof(sqlState_)-1] = 0; } /** * Get the SQSTATE value for this exception * * @return A string, representing the SQLSTATE. Note that * this is a pointer to a data member, the buffer * lives only as long as the UDRException object. */ const char * UDRException::getSQLState() const { return sqlState_; } /** * Get the error message associated with this exception * * @return A string, representing the error message, including * any substituted text with the additional arguments * in the constructor. Note that this is a reference to * a data member, it lives only as long as the * UDRException object. */ const std::string & UDRException::getMessage() const { return text_; } /** * Get the error message associated with this exception * * @return Same as getMessage(). * * @deprecated Use getMessage() instead, in Java that is the * standard method. */ const std::string & UDRException::getText() const { return text_; } // ------------------------------------------------------------------------ // Member functions for class TMUDRSerializableObject // ------------------------------------------------------------------------ TMUDRSerializableObject::TMUDRSerializableObject(TMUDRObjectType objectType, unsigned short version, unsigned short endianness) { v_.objectType_ = static_cast<int>(objectType); v_.totalLength_ = -1; // will be set when we serialize the object v_.version_ = version; v_.endianness_ = endianness; v_.flags_ = 0; v_.filler_ = 0; } TMUDRSerializableObject::TMUDRObjectType TMUDRSerializableObject::getObjectType() const { return static_cast<TMUDRObjectType>(v_.objectType_); } unsigned short TMUDRSerializableObject::getVersion() const { return v_.version_; } TMUDRSerializableObject::Endianness TMUDRSerializableObject::getEndianness() const { return (Endianness) v_.endianness_; } int TMUDRSerializableObject::serializedLength() { return sizeof(v_); } int TMUDRSerializableObject::serialize(Bytes &outputBuffer, int &outputBufferLength) { // as a sanity check, also serialize the total length of the object v_.totalLength_ = serializedLength(); if (outputBufferLength < v_.totalLength_) throw UDRException(38900,"need %d bytes to serialize object of type %d, have %d bytes", v_.totalLength_, static_cast<int>(v_.objectType_), outputBufferLength); memcpy(outputBuffer, (void *) &v_, sizeof(v_)); outputBuffer += sizeof(v_); outputBufferLength -= sizeof(v_); // Checks to be done by the caller: // Once the entire object is serialized, // call validateSerializedLength() to make sure // the number of bytes produced matches v_.totalLength_ return sizeof(v_); } int TMUDRSerializableObject::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { if (inputBufferLength < sizeof(v_)) throw UDRException(38900,"not enough data to deserialize object header, need %d, got %d bytes", sizeof(v_), inputBufferLength); memcpy((void *) &v_, inputBuffer, sizeof(v_)); if (inputBufferLength < v_.totalLength_) throw UDRException(38900,"not enough data to deserialize object of type %d, need %d, got %d bytes", static_cast<int>(v_.objectType_), v_.totalLength_, inputBufferLength); inputBuffer += sizeof(v_); inputBufferLength -= sizeof(v_); // Checks to be done by the caller: // 1. validateObjectType() right after this call // 2. Once the entire object is deserialized, // call validateDeserializedLength() to make sure // the number of bytes consumed matches v_.totalLength_ return sizeof(v_); } void TMUDRSerializableObject::validateObjectType(TMUDRObjectType o) { if (v_.objectType_ != o) throw UDRException(38900,"Object type of expected object (%d) does not match the type (%d) in the serialized buffer", o, static_cast<int>(v_.objectType_)); } void TMUDRSerializableObject::validateSerializedLength(int l) { if (l != v_.totalLength_) throw UDRException(38900,"Expected %d bytes to serialize object of type %d, actually produced %d bytes", v_.totalLength_, static_cast<int>(v_.objectType_), l); } void TMUDRSerializableObject::validateDeserializedLength(int l) { if (l != v_.totalLength_) throw UDRException(38900,"Expected %d bytes to deserialize object of type %d, actually consumed %d bytes", v_.totalLength_, static_cast<int>(v_.objectType_), l); } int TMUDRSerializableObject::serializedLengthOfInt() { return sizeof(int); } int TMUDRSerializableObject::serializedLengthOfLong() { return sizeof(long); } int TMUDRSerializableObject::serializedLengthOfString(const char *s) { return sizeof(int) + strlen(s); } int TMUDRSerializableObject::serializedLengthOfString(int stringLength) { return sizeof(int) + stringLength; } int TMUDRSerializableObject::serializedLengthOfString(const std::string &s) { return serializedLengthOfString(s.size()); } int TMUDRSerializableObject::serializedLengthOfBinary(int binaryLength) { return serializedLengthOfString(binaryLength); } int TMUDRSerializableObject::serializeInt( int i, Bytes &outputBuffer, int &outputBufferLength) { if (outputBufferLength < sizeof(int)) throw UDRException(38900,"insufficient space to serialize an int"); memcpy(outputBuffer, &i, sizeof(int)); outputBuffer += sizeof(int); outputBufferLength -= sizeof(int); return sizeof(int); } int TMUDRSerializableObject::serializeLong( long i, Bytes &outputBuffer, int &outputBufferLength) { if (outputBufferLength < sizeof(long)) throw UDRException(38900,"insufficient space to serialize an int"); memcpy(outputBuffer, &i, sizeof(long)); outputBuffer += sizeof(long); outputBufferLength -= sizeof(long); return sizeof(long); } int TMUDRSerializableObject::serializeString( const char *s, Bytes &outputBuffer, int &outputBufferLength) { int result = 0; int strLen = strlen(s); if (outputBufferLength < sizeof(int) + strLen) throw UDRException(38900,"buffer to serialize string has %d bytes, needs %d", outputBufferLength, strLen); memcpy(outputBuffer, &strLen, sizeof(int)); outputBuffer += sizeof(int); outputBufferLength -= sizeof(int); memcpy(outputBuffer, s, strLen); outputBuffer += strLen; outputBufferLength -= strLen; return sizeof(int) + strLen; } int TMUDRSerializableObject::serializeString( const char *s, int len, Bytes &outputBuffer, int &outputBufferLength) { if (outputBufferLength < sizeof(int) + len) throw UDRException(38900,"buffer to serialize string has %d bytes, needs %d", outputBufferLength, len); memcpy(outputBuffer, &len, sizeof(int)); outputBuffer += sizeof(int); outputBufferLength -= sizeof(int); if (len > 0) { memcpy(outputBuffer, s, len); outputBuffer += len; outputBufferLength -= len; } return sizeof(int) + len; } int TMUDRSerializableObject::serializeString( const std::string &s, Bytes &outputBuffer, int &outputBufferLength) { return serializeString(s.data(), s.size(), outputBuffer, outputBufferLength); } int TMUDRSerializableObject::serializeBinary( const void *b, int len, Bytes &outputBuffer, int &outputBufferLength) { return serializeString(static_cast<const char *>(b), len, outputBuffer, outputBufferLength); } int TMUDRSerializableObject::deserializeInt( int &i, ConstBytes &inputBuffer, int &inputBufferLength) { if (inputBufferLength < sizeof(int)) UDRException(38900,"insufficient space to deserialize an int"); memcpy(&i, inputBuffer, sizeof(int)); inputBuffer += sizeof(int); inputBufferLength -= sizeof(int); return sizeof(int); } int TMUDRSerializableObject::deserializeLong( long &i, ConstBytes &inputBuffer, int &inputBufferLength) { if (inputBufferLength < sizeof(long)) UDRException(38900,"insufficient space to deserialize an int"); memcpy(&i, inputBuffer, sizeof(long)); inputBuffer += sizeof(long); inputBufferLength -= sizeof(long); return sizeof(long); } int TMUDRSerializableObject::deserializeString( const char *&s, int &stringLength, bool makeACopy, ConstBytes &inputBuffer, int &inputBufferLength) { if (inputBufferLength < sizeof(int)) throw UDRException(38900,"insufficient space to deserialize length field of a string"); int len; memcpy(&len, inputBuffer, sizeof(int)); inputBuffer += sizeof(int); inputBufferLength -= sizeof(int); if (inputBufferLength < len) throw UDRException(38900,"string length indicator value %d exceeds size %d of serialized buffer", len, inputBufferLength); if (len <= 0) s = NULL; else if (makeACopy) { char *tempBuf = new char[len]; memcpy(tempBuf, inputBuffer, len); s = tempBuf; } else { // return a pointer to the string - needs to be copied immediately and is not null-terminated s = inputBuffer; } // this is the length of the string in bytes stringLength = len; inputBuffer += len; inputBufferLength -= len; // this is the number of bytes consumed from the buffer return sizeof(int) + len; } int TMUDRSerializableObject::deserializeString( std::string &s, ConstBytes &inputBuffer, int &inputBufferLength) { const char *temp = NULL; int strLen = 0; int result = deserializeString(temp, strLen, false, inputBuffer, inputBufferLength); s.assign(temp, strLen); return result; } int TMUDRSerializableObject::deserializeBinary( const void **b, int &binaryLength, bool makeACopy, ConstBytes &inputBuffer, int &inputBufferLength) { const char *temp; int result = deserializeString(temp, binaryLength, makeACopy, inputBuffer, inputBufferLength); *b = const_cast<char *>(temp); return result; } TMUDRSerializableObject::TMUDRObjectType TMUDRSerializableObject::getNextObjectType( ConstBytes inputBuffer, int inputBufferLength) { // determine the object type of the next object in the buffer if (inputBufferLength < sizeof(v_)) throw UDRException(38900,"not enough data to look at next object header, need %d, got %d bytes", sizeof(v_), inputBufferLength); const headerFields *nextObjInBuffer = reinterpret_cast<const headerFields *>(inputBuffer); return static_cast<TMUDRObjectType>(nextObjInBuffer->objectType_); } // ------------------------------------------------------------------------ // Member functions for class TypeInfo // ------------------------------------------------------------------------ /** Copy constructor */ TypeInfo::TypeInfo(const TypeInfo &type) : TMUDRSerializableObject(TYPE_INFO_OBJ, getCurrentVersion()) { d_.sqlType_ = type.d_.sqlType_; d_.nullable_ = type.d_.nullable_; d_.scale_ = type.d_.scale_; d_.charset_ = type.d_.charset_; d_.intervalCode_ = type.d_.intervalCode_; d_.precision_ = type.d_.precision_; d_.collation_ = type.d_.collation_; d_.length_ = type.d_.length_; d_.dataOffset_ = type.d_.dataOffset_; d_.nullIndOffset_ = type.d_.nullIndOffset_; d_.vcLenIndOffset_ = type.d_.vcLenIndOffset_; d_.flags_ = type.d_.flags_; d_.fillers_[0] = d_.fillers_[1] = d_.fillers_[2] = d_.fillers_[3] = 0; } /** * Default constructor, with optional arguments * * Construct a TypeInfo object from an SQL type, with several optional * arguments (including the SQL type). This is mostly used to create * formal parameters or output columns in the compiler interface, if * a more complex data type is required that is not covered by the * TupleInfo::addXXXColumn() methods. * * @param sqlType SQL type enum to construct the type from. * @param length Length of CHAR/VARCHAR types, not needed for other types. * Note that the length for UTF-8 types is in bytes, not * characters, so this is equivalent to * @n [VAR]CHAR (@c @b length BYTES) CHARACTER SET UTF8 * @param nullable Determines the NULL / NOT NULL attribute of the type * Default: false (that means NOT NULL) * @param scale Scale for numeric type, fraction precision for * fractional seconds, not needed for other types. * @param charset Character set enum for CHAR/VARCHAR types, not needed * for other types. * @param intervalCode Interval code enum for intervals, not needed otherwise. * @param precision Precision for numeric types and leading precision for * interval data types. * @param collation Collation enum for CHAR/VARCHAR types, not needed for * other types. Note that only one type of collation is * currently supported. * @throws UDRException */ TypeInfo::TypeInfo(SQLTypeCode sqlType, int length, bool nullable, int scale, SQLCharsetCode charset, SQLIntervalCode intervalCode, int precision, SQLCollationCode collation) : TMUDRSerializableObject(TYPE_INFO_OBJ, getCurrentVersion()) { d_.sqlType_ = sqlType; d_.nullable_ = nullable; d_.scale_ = scale; d_.charset_ = charset; d_.intervalCode_ = intervalCode; d_.precision_ = precision; d_.collation_ = collation; d_.length_ = length; d_.dataOffset_ = -1; d_.nullIndOffset_ = -1; d_.vcLenIndOffset_ = -1; d_.flags_ = 0; d_.fillers_[0] = d_.fillers_[1] = d_.fillers_[2] = d_.fillers_[3] = 0; switch (sqlType) { case SMALLINT: d_.length_ = 2; d_.precision_ = 0; d_.scale_ = 0; break; case INT: d_.length_ = 4; d_.precision_ = 0; d_.scale_ = 0; break; case LARGEINT: d_.length_ = 8; d_.precision_ = 0; d_.scale_ = 0; break; case NUMERIC: d_.length_ = convertToBinaryPrecision(d_.precision_); if (d_.scale_ < 0 || scale > 18) throw UDRException(38900,"Scale %d of a numeric in TypeInfo::TypeInfo is out of the allowed range of 0-18", d_.scale_); if (scale > precision) throw UDRException(38900,"Scale %d of a numeric in TypeInfo::TypeInfo is greater than precision %d", d_.scale_, d_.precision_); break; case DECIMAL_LSE: if (scale < 0 || scale > 18) throw UDRException(38900,"Scale %d of a decimal in TypeInfo::TypeInfo is out of the allowed range of 0-18", d_.scale_); if (precision < 1 || precision > 18) throw UDRException(38900,"Precision %d of a decimal in TypeInfo::TypeInfo is out of the allowed range of 1-18", d_.precision_); if (scale > precision) throw UDRException(38900,"Scale %d of a decimal in TypeInfo::TypeInfo is greater than precision %d", d_.scale_, d_.precision_); // format [-]mmmm[.sss] - total number of digits = precision d_.length_ = d_.precision_ + 1; // add one for the sign if (d_.scale_ > 0) d_.length_ += 1; // for the decimal point break; case SMALLINT_UNSIGNED: d_.length_ = 2; d_.precision_ = 0; d_.scale_ = 0; break; case INT_UNSIGNED: d_.length_ = 4; d_.precision_ = 0; d_.scale_ = 0; break; case NUMERIC_UNSIGNED: d_.length_ = convertToBinaryPrecision(d_.precision_); if (d_.scale_ < 0 || scale > 18) throw UDRException(38900,"Scale %d of a numeric unsigned in TypeInfo::TypeInfo is out of the allowed range of 0-18", d_.scale_); if (scale > precision) throw UDRException(38900,"Scale %d of a numeric unsigned in TypeInfo::TypeInfo is greater than precision %d", d_.scale_, d_.precision_); break; case DECIMAL_UNSIGNED: if (scale < 0 || scale > 18) throw UDRException(38900,"Scale %d of a decimal unsigned in TypeInfo::TypeInfo is out of the allowed range of 0-18", d_.scale_); if (d_.precision_ < 1 || d_.precision_ > 18) throw UDRException(38900,"Precision %d of a decimal unsigned in TypeInfo::TypeInfo is out of the allowed range of 1-18", d_.precision_); if (scale > precision) throw UDRException(38900,"Scale %d of a decimal unsigned in TypeInfo::TypeInfo is greater than precision %d", d_.scale_, d_.precision_); // format mmmm[.sss] - total number of digits = precision d_.length_ = d_.precision_; if (d_.scale_ > 0) d_.length_ += 1; // for the decimal point break; case REAL: d_.length_ = 4; break; case DOUBLE_PRECISION: d_.length_ = 8; break; case CHAR: if (d_.charset_ == UNDEFINED_CHARSET) throw UDRException(38900,"Charset must be specified for CHAR type in TypeInfo::TypeInfo"); // length is the length in characters, but d_.length_ is // the byte length, multiply by min bytes per char d_.length_ = length * minBytesPerChar(); if (d_.collation_ == UNDEFINED_COLLATION) throw UDRException(38900,"Collation must be specified for CHAR type in TypeInfo::TypeInfo"); break; case VARCHAR: if (d_.charset_ == UNDEFINED_CHARSET) throw UDRException(38900,"Charset must be specified for VARCHAR type in TypeInfo::TypeInfo"); if (d_.collation_ == UNDEFINED_COLLATION) throw UDRException(38900,"Collation must be specified for VARCHAR type in TypeInfo::TypeInfo"); // length is the length in characters, but d_.length_ is // the byte length, multiply by min bytes per char d_.length_ = length * minBytesPerChar(); if (d_.length_ > 32767) // see also CharType::CharType in ../common/CharType.cpp d_.flags_ |= TYPE_FLAG_4_BYTE_VC_LEN; break; case CLOB: case BLOB: // BLOB and CLOB are represented by a handle that looks like a VARCHAR // but may contain binary data (use ISO8859-1 to be able to represent // binary data) d_.charset_ = CHARSET_ISO88591; // should we check the provided length if it comes from the UDR writer? // or just let it error out at runtime with an overflow? break; case DATE: // string yyyy-mm-dd d_.length_ = 10; d_.scale_ = 0; break; case TIME: // string hh:mm:ss d_.length_ = 8; if (scale > 0) d_.length_ += scale+1; if (scale < 0 || scale > 6) throw UDRException(38900,"Scale %d of time in TypeInfo::TypeInfo is outside the allowed range of 0-6", scale); break; case TIMESTAMP: // string yyyy-mm-dd hh:mm:ss.ffffff // 12345678901234567890123456 d_.length_ = 19; if (scale > 0) d_.length_ += scale+1; if (scale < 0 || scale > 6) throw UDRException(38900,"Scale %d of timestamp in TypeInfo::TypeInfo is outside the allowed range of 0-6", scale); break; case INTERVAL: { int totalPrecision = 0; bool allowScale = false; if (d_.intervalCode_ == UNDEFINED_INTERVAL_CODE) throw UDRException(38900,"Interval code in TypeInfo::TypeInfo is undefined"); if (scale < 0 || scale > 6) throw UDRException(38900,"Scale %d of interval in TypeInfo::TypeInfo is outside the allowed range of 0-6", sqlType); // all intervals are treated like signed numbers, need to compute // the length from the combined precision of all parts, see method // IntervalType::getStorageSize() in ../common/IntervalType.cpp and // see also the defaults for leading precision in the SQL Reference // Manual. Note that the default for fraction precision in this // constructor is 0, the default scale for other types. This is // different from the default fraction precision of 6 in Trafodion // SQL!! // start with the leading precision if (precision == 0) totalPrecision = 2; // default leading precision else totalPrecision = precision; switch (d_.intervalCode_) { case INTERVAL_YEAR: case INTERVAL_MONTH: case INTERVAL_DAY: case INTERVAL_HOUR: case INTERVAL_MINUTE: // we are all set break; case INTERVAL_SECOND: // add the fraction precision (scale) totalPrecision += scale; allowScale = true; break; case INTERVAL_YEAR_MONTH: case INTERVAL_DAY_HOUR: case INTERVAL_HOUR_MINUTE: // leading field + 1 more field totalPrecision += 2; break; case INTERVAL_DAY_MINUTE: // leading field + 2 more fields totalPrecision += 4; break; case INTERVAL_DAY_SECOND: totalPrecision += 6 + scale; allowScale = true; break; case INTERVAL_HOUR_SECOND: totalPrecision += 4 + scale; allowScale = true; break; case INTERVAL_MINUTE_SECOND: totalPrecision += 2 + scale; allowScale = true; break; default: throw UDRException( 38900, "TypeInfo::TypeInfo() for interval type with invalid interval code"); } if (scale > 0 && !allowScale) throw UDRException( 38900, "TypeInfo::TypeInfo(): Scale (fraction precision) should not be specified for a type when end field is not SECOND"); // convert decimal to binary precision d_.length_ = convertToBinaryPrecision(totalPrecision); } break; case UNDEFINED_SQL_TYPE: // this case is reached when we call the default constructor, // type and other fields still need to be defined break; default: throw UDRException(38900,"Invalid SQL Type code for the short TypeInfo constructor with an SQL code: %d", sqlType); break; } } TypeInfo::TypeInfo(SQLTypeCode sqlType, bool nullable, int scale, SQLCharsetCode charset, SQLIntervalCode intervalCode, int precision, SQLCollationCode collation, int length) : TMUDRSerializableObject(TYPE_INFO_OBJ, getCurrentVersion()) { d_.sqlType_ = sqlType; d_.nullable_ = (nullable ? 1 : 0); d_.scale_ = scale; d_.charset_ = charset; d_.intervalCode_ = intervalCode; d_.precision_ = precision; d_.collation_ = collation; d_.length_ = length; d_.dataOffset_ = -1; d_.nullIndOffset_ = -1; d_.vcLenIndOffset_ = -1; d_.flags_ = 0; d_.fillers_[0] = d_.fillers_[1] = d_.fillers_[2] = d_.fillers_[3] = 0; } /** * Get the SQL type. * * @return SQL type enum. */ TypeInfo::SQLTypeCode TypeInfo::getSQLType() const { return (TypeInfo::SQLTypeCode) d_.sqlType_; } /** * Get the SQL type class. * * Determine whether this is a numeric character, datetime or interval type. * @return SQL type class enum. */ TypeInfo::SQLTypeClassCode TypeInfo::getSQLTypeClass() const { switch (d_.sqlType_) { case SMALLINT: case INT: case LARGEINT: case NUMERIC: case DECIMAL_LSE: case SMALLINT_UNSIGNED: case INT_UNSIGNED: case NUMERIC_UNSIGNED: case DECIMAL_UNSIGNED: case REAL: case DOUBLE_PRECISION: return NUMERIC_TYPE; case CHAR: case VARCHAR: return CHARACTER_TYPE; case DATE: case TIME: case TIMESTAMP: return DATETIME_TYPE; case INTERVAL: return INTERVAL_TYPE; case BLOB: case CLOB: return LOB_TYPE; default: break; } return UNDEFINED_TYPE_CLASS; } /** * Get the SQL type subclass. * * This goes to one more level of detail beyond the type class, * like exact/approximate numeric, char/varchar, etc. * @return SQL type subclass enum. */ TypeInfo::SQLTypeSubClassCode TypeInfo::getSQLTypeSubClass() const { switch (d_.sqlType_) { case SMALLINT: case INT: case LARGEINT: case NUMERIC: case DECIMAL_LSE: case SMALLINT_UNSIGNED: case INT_UNSIGNED: case NUMERIC_UNSIGNED: case DECIMAL_UNSIGNED: return EXACT_NUMERIC_TYPE; case REAL: case DOUBLE_PRECISION: return APPROXIMATE_NUMERIC_TYPE; case CHAR: return FIXED_CHAR_TYPE; case VARCHAR: return VAR_CHAR_TYPE; case DATE: return DATE_TYPE; case TIME: return TIME_TYPE; case TIMESTAMP: return TIMESTAMP_TYPE; case INTERVAL: switch (d_.intervalCode_) { case INTERVAL_YEAR: case INTERVAL_MONTH: case INTERVAL_YEAR_MONTH: return YEAR_MONTH_INTERVAL_TYPE; case INTERVAL_DAY: case INTERVAL_HOUR: case INTERVAL_MINUTE: case INTERVAL_SECOND: case INTERVAL_DAY_HOUR: case INTERVAL_DAY_MINUTE: case INTERVAL_DAY_SECOND: case INTERVAL_HOUR_MINUTE: case INTERVAL_HOUR_SECOND: case INTERVAL_MINUTE_SECOND: return DAY_SECOND_INTERVAL_TYPE; default: break; } case BLOB: case CLOB: return LOB_SUB_CLASS; default: break; } return UNDEFINED_TYPE_SUB_CLASS; } /** * Get whether the type is nullable. * * @return True for nullable types, false for non-nullable types. */ bool TypeInfo::getIsNullable() const { return (d_.nullable_ != 0); } /** * Get the scale of the data type. * * For integer, largeint, etc. types the scale is 0, since these are * integer data types. For NUMERIC and DECIMAL types, a scale can * be specified. Timestamp and some interval data types have a * "fraction precision" value, which is the number of digits * allowed after the decimal point for seconds. This fraction precision * is returned as the scale, since can be considered the scale of * the seconds part. For other data types like CHAR, the scale * value is meaningless. * @return Scale (digits after the decimal point) for numeric types, * fraction precision (digits of fractional seconds) for intervals. */ int TypeInfo::getScale() const { return d_.scale_; } /** * Get the character set of the data type. * * @return Character set enum. */ TypeInfo::SQLCharsetCode TypeInfo::getCharset() const { return (TypeInfo::SQLCharsetCode) d_.charset_; } /** * Get the interval code for start/end fields. * * @return Interval code enum, indicating start and end fields of an interval type. */ TypeInfo::SQLIntervalCode TypeInfo::getIntervalCode() const { return (TypeInfo::SQLIntervalCode) d_.intervalCode_; } /** * Get the precision (max. number of significant digits). * * The precision is the maximum number of digits before the decimal * point a value can have. For interval types, this is the "leading * precision". For example, an INTEGER value can range from * -2,147,483,648 to 2,147,483,647. It's precision is 10, since the * longest number has 10 digits. Note that not all 10 digit numbers * can be represented in an integer. This is called binary * precision. NUMERIC and DECIMAL types have decimal precision, * meaning that a NUMERIC(10,0) type can represent values from * -9,999,999,999 to +9,999,999,999. * * @return Precision of numeric types or interval types. */ int TypeInfo::getPrecision() const { return d_.precision_; } /** * Get the collation for char/varchar data types. * * Note that, currently, only one collation is supported. * This default collation is a binary collation, except that * trailing blanks are ignored. * * @return Collation enum. */ TypeInfo::SQLCollationCode TypeInfo::getCollation() const { return (TypeInfo::SQLCollationCode) d_.collation_; } /** * Get the length of a value of the type. * * Getting the length is useful for CHAR/VARCHAR data types * but probably not as useful for other types that may have * an internal representation unknown to a UDR writer. * This returns the length in bytes, not in characters. * * @see getCharLength() * * @return Length in bytes. */ int TypeInfo::getByteLength() const { return d_.length_; } /** * Get the maximum number of characters that can be stored in this type. * * This method should be used only for character types that * have a fixed-width encoding. For variable-length encoding, like * UTF-8, the method returns the highest possible number of characters * (assuming single byte characters in the case of UTF-8). Right now, * UTF-8 data types all have byte semantics, meaning there is no * limit for the number of characters stored in a type, it is only * limited by the number of bytes. The method returns 0 for numeric * types. It returns the length of the string representation for * types that are represented by a string, like datetime types. * * @see getByteLength() * * @return Length in bytes. * @throws UDRException */ int TypeInfo::getMaxCharLength() const { switch (getSQLTypeClass()) { case CHARACTER_TYPE: return d_.length_ / minBytesPerChar(); case NUMERIC_TYPE: return 0; case DATETIME_TYPE: // return the length of the string representation // in ISO88591/UTF-8 return d_.length_; case INTERVAL_TYPE: return 0; default: throw UDRException( 38900, "Called TypeInfo::getMaxCharLength() on an unsupported type: %d", d_.sqlType_); } } /** * Set the nullable attribute of a type * * Use this method to set types created locally in the UDF * to be nullable or not nullable. * * @param nullable true to set the type to nullable, false * to give the type the NOT NULL attibute. */ void TypeInfo::setNullable(bool nullable) { d_.nullable_ = nullable; } int TypeInfo::getInt(const char *row, bool &wasNull) const { long result = getLong(row, wasNull); if (result < INT_MIN || result > INT_MAX) throw UDRException( 38900, "Under or overflow in getInt(), %ld does not fit in an int", result); return static_cast<int>(result); } long TypeInfo::getLong(const char *row, bool &wasNull) const { if (row == NULL) throw UDRException( 38900, "Row not available for getLong() or related method"); if (d_.dataOffset_ < 0) throw UDRException( 38900, "Offset for column not set, getLong() or related method not available"); if (d_.nullIndOffset_ >= 0 && (*((short *) (row + d_.nullIndOffset_)) != 0)) { wasNull = true; return 0; } long result = 0; int tempSQLType = d_.sqlType_; const char *data = row + d_.dataOffset_; wasNull = false; // convert NUMERIC to the corresponding type with binary precision // see also code in LmTypeIsString() in file ../generator/LmExpr.cpp if (d_.sqlType_ == NUMERIC || d_.sqlType_ == NUMERIC_UNSIGNED || d_.sqlType_ == INTERVAL) { if (d_.length_ == 2) if (d_.sqlType_ == NUMERIC_UNSIGNED) tempSQLType = SMALLINT_UNSIGNED; else tempSQLType = SMALLINT; else if (d_.length_ == 4) if (d_.sqlType_ == NUMERIC_UNSIGNED) tempSQLType = INT_UNSIGNED; else tempSQLType = INT; else if (d_.length_ == 8) tempSQLType = LARGEINT; // unsigned 8 byte integer is not supported } switch (tempSQLType) { case SMALLINT: result = *((short *) data); break; case INT: result = *((int *) data); break; case LARGEINT: result = *((long *) data); break; case SMALLINT_UNSIGNED: result = *((unsigned short *) data); break; case INT_UNSIGNED: result = *((int *) data); break; case DECIMAL_LSE: case DECIMAL_UNSIGNED: { long fractionalPart = 0; bool isNegative = false; bool overflow = false; char buf[200]; int dataLen = d_.length_; if (*data == '-') { isNegative = true; data++; dataLen--; } // copy the value to be able to add a terminating NUL byte memcpy(buf, data, dataLen); buf[dataLen] = 0; if (d_.scale_ == 0) { if (sscanf(buf, "%ld", &result) != 1) throw UDRException( 38900, "Error converting decimal value %s to a long", buf); } else { if (sscanf(buf, "%ld.%ld", &result, &fractionalPart) != 2) throw UDRException( 38900, "Error converting decimal value %s (with scale) to a long", buf); for (int s=0; s<d_.scale_; s++) if (result <= LONG_MAX/10) result *= 10; else overflow = true; if (result <= LONG_MAX - fractionalPart) result += fractionalPart; else overflow = true; } if (isNegative) if (result < LONG_MAX) result = -result; else overflow = true; if (overflow) throw UDRException( 38900, "Under or overflow occurred, converting decimal to a long"); } break; case REAL: case DOUBLE_PRECISION: { double dresult = getDouble(row, wasNull); if (dresult < LONG_MIN || dresult > LONG_MAX) throw UDRException( 38900, "Overflow in getInt() or getLong(), float value %g does not fit in a long", dresult); result = static_cast<long>(dresult); } break; default: throw UDRException(38902, "TypeInfo::getLong() and getDouble() not supported for SQL type %d", d_.sqlType_); break; } return result; } double TypeInfo::getDouble(const char *row, bool &wasNull) const { if (row == NULL) throw UDRException( 38900, "Row not available for getDouble()"); if (d_.dataOffset_ < 0) throw UDRException( 38900, "Offset for column not set, getDouble() method not available"); if (d_.nullIndOffset_ >= 0 && *((short *) (row + d_.nullIndOffset_)) != 0) { wasNull = true; return 0.0; } double result = 0.0; const char *data = row + d_.dataOffset_; wasNull = false; switch (d_.sqlType_) { case REAL: result = *((float *) data); break; case DOUBLE_PRECISION: result = *((double *) data); break; case SMALLINT: case INT: case LARGEINT: case NUMERIC: case DECIMAL_LSE: case SMALLINT_UNSIGNED: case INT_UNSIGNED: case NUMERIC_UNSIGNED: case DECIMAL_UNSIGNED: case INTERVAL: { result = static_cast<double>(getLong(row, wasNull)); // for numbers with a scale, ensure that the decimal // point is at the right place for floating point results for (int s=0; s<d_.scale_; s++) result /= 10; } break; default: throw UDRException(38900, "getDouble() not supported for SQL type %d", d_.sqlType_); break; } return result; } time_t TypeInfo::getTime(const char *row, bool &wasNull) const { time_t result = 0; if (d_.sqlType_ == INTERVAL) { long longVal = getLong(row, wasNull); if (wasNull) return 0; // convert the interval value to seconds // NOTE: This relies on the assumption that time_t // uses seconds as its unit, which is true for // current Linux systems but may not always remain // true switch (d_.intervalCode_) { case INTERVAL_DAY: result = longVal * 86400; break; case INTERVAL_HOUR: case INTERVAL_DAY_HOUR: result = longVal * 3600; break; case INTERVAL_MINUTE: case INTERVAL_DAY_MINUTE: case INTERVAL_HOUR_MINUTE: result = longVal * 60; break; case INTERVAL_SECOND: case INTERVAL_DAY_SECOND: case INTERVAL_HOUR_SECOND: case INTERVAL_MINUTE_SECOND: { // scale the value down and ignore fractional seconds for (int s=0; s<d_.scale_; s++) longVal /= 10; result = longVal; } break; default: throw UDRException( 38900, "getTime() is not supported for year-month intervals"); } } // intervals else { int stringLen = 0; const char *val = getRaw(row, wasNull, stringLen); char buf[200]; struct tm t; bool ok = true; if (wasNull) return 0; t.tm_sec = t.tm_min = t.tm_hour = t.tm_mday = t.tm_mon = t.tm_year = t.tm_wday = t.tm_yday = t.tm_isdst = 0; if (stringLen+1 > sizeof(buf)) throw UDRException( 38900, "Datetime string of length %d exceeds size limit of %d for time_t conversion", stringLen, (int) sizeof(buf) - 1); memcpy(buf, val, stringLen); buf[stringLen] = 0; switch (d_.sqlType_) { case DATE: // yyyy-mm-dd ok = (sscanf(buf,"%4d-%2d-%2d", &t.tm_year, &t.tm_mon, &t.tm_mday) == 3); result = mktime(&t); break; case TIME: // hh:mm:ss ok = (sscanf(buf,"%2d:%2d:%2d", &t.tm_hour, &t.tm_min, &t.tm_sec) == 3); result = 3600 * t.tm_hour + 60 * t.tm_min + t.tm_sec; break; case TIMESTAMP: // yy-mm-dd hh:mm:ss ok = (sscanf(buf,"%4d-%2d-%2d %2d:%2d:%2d", &t.tm_year, &t.tm_mon, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) == 6); result = mktime(&t); break; default: throw UDRException(38900, "getTime() not supported for SQL type %d", d_.sqlType_); } if (!ok) throw UDRException( 38900, "Unable to parse datetime string %s for conversion to time_t", buf); // catch errors returned by mktime if (result < 0) throw UDRException( 38900, "Unable to convert datetime string %s to time_t", buf); } return result; } const char * TypeInfo::getRaw(const char *row, bool &wasNull, int &byteLen) const { if (row == NULL) throw UDRException( 38900, "Row not available for getRaw()"); if (d_.dataOffset_ < 0) throw UDRException( 38900, "Offset for column not set, getRaw() method not available"); if (d_.nullIndOffset_ >= 0 && *((short *) (row + d_.nullIndOffset_)) != 0) { wasNull = true; byteLen = 0; return NULL; } const char *result = row + d_.dataOffset_; wasNull = false; switch (d_.sqlType_) { case VARCHAR: case BLOB: case CLOB: if (d_.flags_ & TYPE_FLAG_4_BYTE_VC_LEN) { const int32_t *vcLen4 = reinterpret_cast<const int32_t *>(row + d_.vcLenIndOffset_); byteLen = *vcLen4; } else { const int16_t *vcLen2 = reinterpret_cast<const int16_t *>(row + d_.vcLenIndOffset_); byteLen = *vcLen2; } break; case UNDEFINED_SQL_TYPE: throw UDRException(38900, "getString()/getRaw() not supported for SQL type %d", d_.sqlType_); break; default: byteLen = d_.length_; if (d_.sqlType_ == CHAR) switch (d_.charset_) { case CHARSET_ISO88591: case CHARSET_UTF8: // trim trailing blanks from the value while (byteLen > 0 && result[byteLen-1] == ' ') byteLen--; break; case CHARSET_UCS2: // trim trailing little-endian UCS2 blanks // from the value while (byteLen > 1 && reinterpret_cast<const unsigned short *>(result)[byteLen/2-1] == (unsigned short) ' ') byteLen -= 2; break; default: throw UDRException( 38900, "Unsupported character set in TupleInfo::getRaw(): %d", d_.charset_); } break; } return result; } bool TypeInfo::isAvailable() const { return (d_.dataOffset_ >= 0); } void TypeInfo::setInt(int val, char *row) const { setLong(val, row); } void TypeInfo::setLong(long val, char *row) const { if (row == NULL || d_.dataOffset_ < 0) throw UDRException(38900, "setInt() or setLong() on a non-existent value"); // set NULL indicator to 0 if (d_.nullIndOffset_ >= 0) *(reinterpret_cast<short *>(row + d_.nullIndOffset_)) = 0; int tempSQLType = d_.sqlType_; char *data = row + d_.dataOffset_; // convert NUMERIC to the corresponding type with binary precision if (d_.sqlType_ == NUMERIC || d_.sqlType_ == NUMERIC_UNSIGNED || d_.sqlType_ == INTERVAL) { if (d_.length_ == 2) if (d_.sqlType_ == NUMERIC_UNSIGNED) tempSQLType = SMALLINT_UNSIGNED; else tempSQLType = SMALLINT; else if (d_.length_ == 4) if (d_.sqlType_ == NUMERIC_UNSIGNED) tempSQLType = INT_UNSIGNED; else tempSQLType = INT; else if (d_.length_ == 8) tempSQLType = LARGEINT; // unsigned 8 byte integer is not supported } switch (tempSQLType) { case SMALLINT: *((short *) data) = val; break; case INT: *((int *) data) = val; break; case LARGEINT: *((long *) data) = val; break; case SMALLINT_UNSIGNED: if (val < 0) throw UDRException( 38900, "Trying to assign a negative value to a SMALLINT UNSIGNED type"); *((unsigned short *) data) = val; break; case INT_UNSIGNED: if (val < 0) throw UDRException( 38900, "Trying to assign a negative value to an INT UNSIGNED type"); *((int *) data) = val; break; case DECIMAL_LSE: case DECIMAL_UNSIGNED: { bool overflow = false; bool isNegative = false; int remainingLength = d_.length_; int neededLengthWithoutSign = d_.precision_ + (d_.scale_ > 0 ? 1 : 0); char buf[20]; int remainingBufLength = sizeof(buf); char *bufPtr = buf; const long maxvals[] = {9L, 99L, 999L, 9999L, 99999L, 999999L, 9999999L, 99999999L, 999999999L, 9999999999L, 99999999999L, 999999999999L, 9999999999999L, 99999999999999L, 999999999999999L, 9999999999999999L, 99999999999999999L, 999999999999999999L}; if (d_.precision_ < 1 || d_.precision_ > 18 || d_.scale_ < 0 || d_.scale_ > d_.precision_) throw UDRException( 38900, "Invalid precision (%d) or scale (%d) for a decimal data type", d_.precision_, d_.scale_); // right now precision is limited to 18, but may need to use this code for BigNum // so we add a check for this future case if (d_.length_ >= sizeof(buf)) throw UDRException( 38900, "Decimal precision %d is not supported by setLong(), limit is %d", d_.precision_, sizeof(buf)); if (val < 0) { if (tempSQLType == DECIMAL_UNSIGNED) throw UDRException( 38900, "Trying to assign a negative value to a DECIMAL UNSIGNED type"); val = -val; isNegative = true; *bufPtr = '-'; bufPtr++; remainingLength--; remainingBufLength--; } // add enough blanks to print the number right-adjusted while (neededLengthWithoutSign < remainingLength) { *bufPtr = ' '; bufPtr++; remainingLength--; remainingBufLength--; } // sanity check, d_.length_ should have enough space for sign, // precision and decimal point if (remainingLength < neededLengthWithoutSign) throw UDRException( 38900, "Internal error, field length too short in setLong() (%d, %d)", remainingLength, neededLengthWithoutSign); // validate limits for decimal precision if (val > maxvals[d_.precision_-1]) throw UDRException( 38900, "Overflow occurred while converting value %ld to a DECIMAL(%d, %d)", val, d_.precision_, d_.scale_); if (d_.scale_ == 0) { snprintf(bufPtr, remainingBufLength, "%0*ld", d_.precision_, val); } else { long fractionalValue = 0; long multiplier = 1; for (int s=0; s<d_.scale_; s++) { fractionalValue += multiplier * (val % 10); val /= 10; multiplier *= 10; } snprintf(bufPtr, remainingBufLength, "%0*ld.%0*ld", d_.precision_-d_.scale_, val, d_.scale_, fractionalValue); } // snprintf put a terminating NUL byte into the string, // which is not allowed in the actual record, copy the // part without this extra byte into the record memcpy(data, buf, d_.length_); } break; case REAL: case DOUBLE_PRECISION: setDouble(val, row); break; default: throw UDRException(38900, "setLong(), setInt() or related is not supported for data type %d", d_.sqlType_); } } void TypeInfo::setDouble(double val, char *row) const { if (row == NULL || d_.dataOffset_ < 0) throw UDRException(38900, "setDouble() on a non-existent value"); // set NULL indicator to 0 if (d_.nullIndOffset_ >= 0) *(reinterpret_cast<short *>(row + d_.nullIndOffset_)) = 0; const char *data = row + d_.dataOffset_; switch (d_.sqlType_) { case REAL: // we are not testing for underflow at this point if (val > FLT_MAX || val < -FLT_MAX) throw UDRException( 38900, "Overflow when assigining to REAL type"); *(reinterpret_cast<float *>(row + d_.dataOffset_)) = val; break; case DOUBLE_PRECISION: *(reinterpret_cast<double *>(row + d_.dataOffset_)) = val; break; default: throw UDRException(38900, "setDouble() is not supported for data type %d", d_.sqlType_); } } void TypeInfo::setTime(time_t val, char *row) const { if (d_.sqlType_ == INTERVAL) { long tVal = static_cast<long>(val); long result = 0; // convert the time_t value to the base units of the interval // NOTE: This relies on the assumption that time_t // uses seconds as its unit, which is true for // current Linux systems but may not always remain // true. It may also some day become bigger than long. switch (d_.intervalCode_) { case INTERVAL_DAY: result = tVal/86400; break; case INTERVAL_HOUR: case INTERVAL_DAY_HOUR: result = tVal/3600; break; case INTERVAL_MINUTE: case INTERVAL_DAY_MINUTE: case INTERVAL_HOUR_MINUTE: result = tVal/60; break; case INTERVAL_SECOND: case INTERVAL_DAY_SECOND: case INTERVAL_HOUR_SECOND: case INTERVAL_MINUTE_SECOND: { // scale the value up for (int s=0; s<d_.scale_; s++) tVal *= 10; result = tVal; } break; default: throw UDRException( 38900, "getTime() is not supported for year-month intervals"); } setLong(result, row); } // intervals else { struct tm t; time_t temp = val; char buf[64]; const char *fraction = ".000000"; int strLimit = sizeof(buf) - strlen(fraction); if (gmtime_r(&temp, &t) != &t) throw UDRException( 38900, "Unable to interpret time_t value %ld", (long) val); switch (d_.sqlType_) { case DATE: // yyyy-mm-dd snprintf(buf, strLimit, "%04d-%02d-%02d", t.tm_year, t.tm_mon, t.tm_mday); break; case TIME: // hh:mm:ss snprintf(buf, strLimit, "%02d:%02d:%02d", t.tm_hour, t.tm_min, t.tm_sec); break; case TIMESTAMP: // yyyy-mm-d hh:mm:ss snprintf(buf, strLimit, "%04d-%02d-%02d %02d:%02d:%02d", t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec); break; default: throw UDRException(38900, "setTime() not supported for SQL type %d", d_.sqlType_); } // add fraction (with value 0) if needed if (d_.scale_ > 0) strncat(buf, fraction, d_.scale_+1); setString(buf, strlen(buf), row); } // types other than intervals } void TypeInfo::setString(const char *val, int stringLen, char *row) const { if (row == NULL || d_.dataOffset_ < 0) throw UDRException(38900, "setString() on a non-existent value"); // set NULL indicator, a stringLen of 0 and a NULL val ptr is interpreted // as NULL, everything else as non-null if (d_.nullIndOffset_ >= 0) if (val == NULL) { if (stringLen > 0) throw UDRException( 38900, "setString with NULL string and string len > 0"); setNull(row); return; } else *(reinterpret_cast<short *>(row + d_.nullIndOffset_)) = 0; char *data = row + d_.dataOffset_; bool isApproxNumeric = false; switch (d_.sqlType_) { case CHAR: case VARCHAR: case DATE: case TIME: case TIMESTAMP: case BLOB: case CLOB: // a string overflow will raise an exception if (stringLen > d_.length_) // should probably check whether this is a CHAR or datetime // and the excess characters are all blanks throw UDRException( 38900, "setString() with a string of length %d on a column with length %d", stringLen, d_.length_); // for these types, copy the string and pad with blanks // of the appropriate charset for fixed-length strings memcpy(data, val, stringLen); if (d_.sqlType_ == VARCHAR || d_.sqlType_ == BLOB || d_.sqlType_ == CLOB) { // set the varchar length indicator if (d_.vcLenIndOffset_ < 0) throw UDRException(38900, "Internal error, VARCHAR/BLOB/CLOB without length indicator"); if (d_.flags_ & TYPE_FLAG_4_BYTE_VC_LEN) *(reinterpret_cast<int32_t *>(row + d_.vcLenIndOffset_)) = stringLen; else *(reinterpret_cast<int16_t *>(row + d_.vcLenIndOffset_)) = stringLen; } else if (stringLen < d_.length_) // fill fixed character value with blanks of the appropriate // character set switch (d_.charset_) { case CHARSET_ISO88591: case CHARSET_UTF8: memset(data+stringLen, ' ', d_.length_ - stringLen); break; case CHARSET_UCS2: { int paddedLen = stringLen; // pad with little-endian UCS-2 blanks while (paddedLen+1 < d_.length_) { reinterpret_cast<unsigned short *>(data)[paddedLen/2] = (unsigned short) ' '; paddedLen += 2; } } break; default: throw UDRException( 38900, "Unsupported character set in TupleInfo::setString(): %d", d_.charset_); } break; case REAL: case DOUBLE_PRECISION: isApproxNumeric = true; // fall through to next case case SMALLINT: case INT: case LARGEINT: case NUMERIC: case DECIMAL_LSE: case SMALLINT_UNSIGNED: case INT_UNSIGNED: case NUMERIC_UNSIGNED: case DECIMAL_UNSIGNED: { char buf[200]; long lval; double dval; int numCharsConsumed = 0; int rc = 0; // ignore trailing blanks while (val[stringLen-1] == ' ') stringLen--; if (stringLen+1 > sizeof(buf)) throw UDRException( 38900, "String of length %d exceeds size limit of %d for numeric conversion", stringLen, (int) sizeof(buf) - 1); // copy the value to be able to add a terminating NUL byte memcpy(buf, val, stringLen); buf[stringLen] = 0; if (isApproxNumeric) rc = sscanf(buf,"%lf%n", &dval, &numCharsConsumed) < 0; else rc = sscanf(buf,"%ld%n", &lval, &numCharsConsumed) < 0; if (rc < 0) throw UDRException( 38900, "Error in setString(), \"%s\" is not a numeric value", buf); // check for any non-white space left after conversion while (numCharsConsumed < stringLen) if (buf[numCharsConsumed] != ' ' && buf[numCharsConsumed] != '\t') throw UDRException( 38900, "Found non-numeric character in setString for a numeric column: %s", buf); else numCharsConsumed++; if (isApproxNumeric) setDouble(dval, row); else setLong(lval, row); } break; case INTERVAL: { char buf[100]; char *strVal; unsigned long years, months, days, hours, minutes, seconds, singleField; long result = 0; unsigned long fractionalVal = 0; int numLeaderChars = 0; int numCharsConsumed = 0; int numFractionChars = 0; bool ok = true; bool readFraction = false; bool isNegative = false; // ignore trailing blanks while (val[stringLen-1] == ' ') stringLen--; if (stringLen+1 > sizeof(buf)) throw UDRException( 38900, "String of length %d exceeds size limit of %d for interval conversion", stringLen, (int) sizeof(buf) - 1); // copy the value to be able to add a terminating NUL byte memcpy(buf, val, stringLen); buf[stringLen] = 0; strVal = buf; // check for the sign while (*strVal == ' ') strVal++; if (*strVal == '-') { isNegative = true; strVal++; } numLeaderChars = strVal - buf; // Use sscanf to convert string representation to a number. // Note that this does not check for overflow, which cannot occur // for valid interval literals, and that it also may allow some // string that aren't quite legal, such as 01:120:00 (should be 03:00:00). // We treat such overflows like other overflows that could occur in the // user-written code. switch (d_.intervalCode_) { case INTERVAL_YEAR: case INTERVAL_MONTH: case INTERVAL_DAY: case INTERVAL_HOUR: case INTERVAL_MINUTE: ok = (sscanf(strVal, "%lu%n", &singleField, &numCharsConsumed) >= 1); result = singleField; break; case INTERVAL_SECOND: ok = (sscanf(strVal, "%lu%n", &singleField, &numCharsConsumed) >= 1); result = singleField; readFraction = true; break; case INTERVAL_YEAR_MONTH: ok = (sscanf(strVal, "%lu-%lu%n", &years, &months, &numCharsConsumed) >= 2); result = years * 12 + months; break; case INTERVAL_DAY_HOUR: ok = (sscanf(strVal, "%lu %lu%n", &days, &hours, &numCharsConsumed) >= 2); result = days * 24 + hours; break; case INTERVAL_DAY_MINUTE: ok = (sscanf(strVal, "%lu %lu:%lu%n", &days, &hours, &minutes, &numCharsConsumed) >= 3); result = days * 1440 + hours * 60 + minutes; break; case INTERVAL_DAY_SECOND: ok = (sscanf(strVal, "%lu %lu:%lu:%lu%n", &days, &hours, &minutes, &seconds, &numCharsConsumed) >= 4); result = days * 86400 + hours * 3600 + minutes * 60 + seconds; readFraction = true; break; case INTERVAL_HOUR_MINUTE: ok = (sscanf(strVal, "%lu:%lu%n", &hours, &minutes, &numCharsConsumed) >= 2); result = hours * 60 + minutes; break; case INTERVAL_HOUR_SECOND: ok = (sscanf(strVal, "%lu:%lu:%lu%n", &hours, &minutes, &seconds, &numCharsConsumed) >= 3); result = hours * 3600 + minutes * 60 + seconds; readFraction = true; break; case INTERVAL_MINUTE_SECOND: ok = (sscanf(strVal, "%lu:%lu%n", &minutes, &seconds, &numCharsConsumed) >= 2); result = minutes * 60 + seconds; readFraction = true; break; default: throw UDRException( 38900, "Invalid interval code in TupleInfo::setString()"); } strVal += numCharsConsumed; // allow fractional seconds, regardless of whether fraction precision is >0 if (ok && readFraction && *strVal == '.') { ok = (sscanf(strVal,".%ld%n", &fractionalVal, &numFractionChars) >= 1); strVal += numFractionChars; // then, if the fractional seconds are not 0, complain if fraction // precision is 0. if (fractionalVal > 0 && d_.scale_ == 0) throw UDRException( 38900, "Encountered a fractional second part in a string value for an interval type that doesn't allow fractional values: %s", buf); } if (!ok) throw UDRException( 38900, "Error in setString(), \"%s\" is not an interval value for interval code %d", buf, d_.intervalCode_); // check for any non-white space left after conversion while (strVal - buf < stringLen) if (*strVal != ' ' && *strVal != '\t') throw UDRException( 38900, "Found non-numeric character in setString for an interval column: %s", buf); else strVal++; if (d_.scale_ > 0) { long fractionOverflowTest = fractionalVal; // scale up the result for (int s=0; s<d_.scale_; s++) { result *= 10; fractionOverflowTest /= 10; } if (fractionOverflowTest != 0) throw UDRException( 38900, "Fractional value %ld exceeds allowed range for interval fraction precision %d", fractionalVal, d_.scale_); // add whole and fractional seconds (could overflow in extreme cases) result += fractionalVal; } // could overflow in extreme cases if (isNegative) result = -result; // result could exceed allowed precision, will cause an executor error when processed further setDouble(result, row); } break; case UNDEFINED_SQL_TYPE: default: throw UDRException(38900, "setString() is not yet supported for data type %d", d_.sqlType_); } } void TypeInfo::setNull(char *row) const { if (row == NULL || d_.dataOffset_ < 0) throw UDRException(38900, "setNull() on a non-existent value"); // set NULL indicator to -1 if (d_.nullIndOffset_ >= 0) *(reinterpret_cast<short *>(row + d_.nullIndOffset_)) = -1; else throw UDRException(38900, "Trying to set a non-nullable value to NULL"); } int TypeInfo::minBytesPerChar() const { switch (d_.charset_) { case CHARSET_ISO88591: case CHARSET_UTF8: return 1; case CHARSET_UCS2: return 2; default: throw UDRException( 38900, "Minimum bytes per char not defined for charset %d", d_.charset_); } } int TypeInfo::convertToBinaryPrecision(int decimalPrecision) const { if (decimalPrecision < 1 || decimalPrecision > 18) throw UDRException( 38900, "Decimal precision %d is out of the allowed range of 1-18", decimalPrecision); if (decimalPrecision < 5) return 2; else if (decimalPrecision < 10) return 4; else return 8; } void TypeInfo::toString(std::string &s, bool longForm) const { char buf[100]; switch (d_.sqlType_) { case UNDEFINED_SQL_TYPE: s += "undefined_sql_type"; break; case SMALLINT: s += "SMALLINT"; break; case INT: s += "INT"; break; case LARGEINT: s += "LARGEINT"; break; case NUMERIC: snprintf(buf, sizeof(buf), "NUMERIC(%d,%d)", getPrecision(), getScale()); s += buf; break; case DECIMAL_LSE: snprintf(buf, sizeof(buf), "DECIMAL(%d,%d)", getPrecision(), getScale()); s += buf; break; case SMALLINT_UNSIGNED: s += "SMALLINT UNSIGNED"; break; case INT_UNSIGNED: s += "INT UNSIGNED"; break; case NUMERIC_UNSIGNED: snprintf(buf, sizeof(buf), "NUMERIC(%d,%d) UNSIGNED", getPrecision(), getScale()); s += buf; break; case DECIMAL_UNSIGNED: snprintf(buf, sizeof(buf), "DECIMAL(%d,%d) UNSIGNED", getPrecision(), getScale()); s += buf; break; case REAL: s += "REAL"; break; case DOUBLE_PRECISION: s += "DOUBLE PRECISION"; break; case CHAR: case VARCHAR: const char *csName; switch(getCharset()) { case UNDEFINED_CHARSET: csName = "undefined"; break; case CHARSET_ISO88591: csName = "ISO88591"; break; case CHARSET_UTF8: csName = "UTF8"; break; case CHARSET_UCS2: csName = "UCS2"; break; default: csName = "invalid charset!"; break; } snprintf(buf, sizeof(buf), "%s(%d%s) CHARACTER SET %s", (d_.sqlType_ == CHAR ? "CHAR" : "VARCHAR"), getMaxCharLength(), (getCharset() == CHARSET_UTF8 ? " BYTES" : ""), csName); s += buf; break; case DATE: s += "DATE"; break; case TIME: s += "TIME"; if (d_.scale_ > 0) { snprintf(buf, sizeof(buf), "(%d)", d_.scale_); s += buf; } break; case TIMESTAMP: snprintf(buf, sizeof(buf), "TIMESTAMP(%d)", d_.scale_); s += buf; break; case INTERVAL: switch (d_.intervalCode_) { case UNDEFINED_INTERVAL_CODE: snprintf(buf, sizeof(buf), "INTERVAL with undefined subtype!"); break; case INTERVAL_YEAR: snprintf(buf, sizeof(buf), "INTERVAL YEAR(%d)", getPrecision()); break; case INTERVAL_MONTH: snprintf(buf, sizeof(buf), "INTERVAL MONTH(%d)", getPrecision()); break; case INTERVAL_DAY: snprintf(buf, sizeof(buf), "INTERVAL DAY(%d)", getPrecision()); break; case INTERVAL_HOUR: snprintf(buf, sizeof(buf), "INTERVAL HOUR(%d)", getPrecision()); break; case INTERVAL_MINUTE: snprintf(buf, sizeof(buf), "INTERVAL MINUTE(%d)", getPrecision()); break; case INTERVAL_SECOND: snprintf(buf, sizeof(buf), "INTERVAL SECOND(%d,%d)", getPrecision(), getScale()); break; case INTERVAL_YEAR_MONTH: snprintf(buf, sizeof(buf), "INTERVAL YEAR(%d) TO MONTH", getPrecision()); break; case INTERVAL_DAY_HOUR: snprintf(buf, sizeof(buf), "INTERVAL DAY(%d) TO HOUR", getPrecision()); break; case INTERVAL_DAY_MINUTE: snprintf(buf, sizeof(buf), "INTERVAL DAY(%d) TO MINUTE", getPrecision()); break; case INTERVAL_DAY_SECOND: snprintf(buf, sizeof(buf), "INTERVAL DAY(%d) TO SECOND(%d)", getPrecision(), getScale()); break; case INTERVAL_HOUR_MINUTE: snprintf(buf, sizeof(buf), "INTERVAL HOUR(%d) TO MINUTE", getPrecision()); break; case INTERVAL_HOUR_SECOND: snprintf(buf, sizeof(buf), "INTERVAL HOUR(%d) TO SECOND(%d)", getPrecision(), getScale()); break; case INTERVAL_MINUTE_SECOND: snprintf(buf, sizeof(buf), "INTERVAL MINUTE(%d) TO SECOND(%d)", getPrecision(), getScale()); break; default: snprintf(buf, sizeof(buf), "invalid interval code!"); } s += buf; break; case BLOB: s += "BLOB"; break; case CLOB: s += "CLOB"; break; default: s += "invalid SQL type!"; break; } if (!d_.nullable_) s += " NOT NULL"; if (longForm && d_.dataOffset_ >= 0) { snprintf(buf, sizeof(buf), " offsets: (nullInd=%d, vcLen=%d, data=%d)", d_.nullIndOffset_, d_.vcLenIndOffset_, d_.dataOffset_); s += buf; } } int TypeInfo::serializedLength() { // format is base class bytes + binary image of d_ return TMUDRSerializableObject::serializedLength() + serializedLengthOfBinary(sizeof(d_)); } int TypeInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); result += serializeBinary(&d_, sizeof(d_), outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int TypeInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); validateObjectType(TYPE_INFO_OBJ); int binarySize = 0; const void *temp = NULL; result += deserializeBinary(&temp, binarySize, false, inputBuffer, inputBufferLength); if (binarySize != sizeof(d_)) throw UDRException(38900,"Expected %d bytes to deserialize TypeInfo struct, actually used %d bytes", sizeof(d_), binarySize); memcpy(&d_, temp, binarySize); validateDeserializedLength(result); return result; } void TypeInfo::setOffsets(int indOffset, int vcOffset, int dataOffset) { d_.nullIndOffset_ = indOffset; d_.vcLenIndOffset_ = vcOffset; d_.dataOffset_ = dataOffset; } // ------------------------------------------------------------------------ // Member functions for class ProvenanceInfo // ------------------------------------------------------------------------ /** * Default constructor, generates unspecified provenance. */ ProvenanceInfo::ProvenanceInfo() : inputTableNum_(-1), inputColNum_(-1) {} /** * Constructor to link an output column to a specific input column * * This constructor can be used to produce a "passthru column". An easier * way to do this is the UDRInvocationInfo::addPassThruColumns() method. * * @param inputTableNum Input table number (0 for a TMUDF with a single * table-valued input, the most common case). * @param inputColNum Column number in intput table "inputTableNum" * that is the source of the output column to be * produced. */ ProvenanceInfo::ProvenanceInfo(int inputTableNum, int inputColNum) : inputTableNum_(inputTableNum), inputColNum_(inputColNum) {} /** * Get the input table number. * * @return Input table number. */ int ProvenanceInfo::getInputTableNum() const { return inputTableNum_; } /** * Get the input column number. * * @return Input column number. */ int ProvenanceInfo::getInputColumnNum() const { return inputColNum_; } /** * Test whether the column comes from any or from a specific table-valued input. * * @param inputTableNum -1 to test for any table-valued input, or a specific * input table number. * @return true if the provenance indicates a column that comes from the * specified input table(s), false otherwise */ bool ProvenanceInfo::isFromInputTable(int inputTableNum) const { return (inputTableNum_ >= 0 && inputColNum_ >= 0 && (inputTableNum > 0 ? inputTableNum == inputTableNum_ : true)); } // ------------------------------------------------------------------------ // Member functions for class ColumnInfo // ------------------------------------------------------------------------ /** * Default constructor */ ColumnInfo::ColumnInfo() : TMUDRSerializableObject(COLUMN_INFO_OBJ, getCurrentVersion()), usage_(UNKNOWN), estimatedUniqueEntries_(-1) {} /** * Constructor, specifying a name and a type */ ColumnInfo::ColumnInfo(const char *name, const TypeInfo &type) : TMUDRSerializableObject(COLUMN_INFO_OBJ, getCurrentVersion()), name_(name), type_(type), usage_(UNKNOWN), estimatedUniqueEntries_(-1) {} /** * Get the name of the column. * * @return Name of the column in UTF-8. */ const std::string &ColumnInfo::getColName() const { return name_; } /** * Get the type of the column. * * @return Type of the column. */ const TypeInfo &ColumnInfo::getType() const { return type_; } /** * Non-const method to get the type. * * @return Non-const type of the column. Note that the * types of parameters and output columns can only * be changed from the * UDR::describeParamsAndColumns() call. */ TypeInfo & ColumnInfo::getType() { return type_; } /** * Get the estimated number of unique entries. * * This returns an estimate for the number of unique values * for this column in the table. For example, a column containing * the names of US states would have approximately 50 distinct * values, assuming that most or all states are represented. * This estimate can be provided by the UDR writer, through the * setUniqueEntries() method, or in some cases it can also be * provided by the Trafodion compiler. * * @see ColumnInfo::setEstimatedUniqueEntries() * * @return Estimated number of unique entries or -1 if there is no estimate. */ long ColumnInfo::getEstimatedUniqueEntries() const { return estimatedUniqueEntries_; } /** * Get the usage of an input or output column. * * This usage may be set in the * UDR::describeDataflowAndPredicates() method, * set automatically by Trafodion for certain situations * with passthru columns, or left at the default of USED. * * @return Usage enum value for the column. */ ColumnInfo::ColumnUseCode ColumnInfo::getUsage() const { return usage_; } /** * Get provenance info for an output column. * * @return Provenance of the column. */ const ProvenanceInfo &ColumnInfo::getProvenance() const { return provenance_; } /** * Set the name of the column. * * @param colName Name of the column (in UTF-8). There is a length * limit of 256 bytes for the column name. */ void ColumnInfo::setColName(const char *colName) { name_ = colName; } /** * Set the type of the column. * * This is done by constructing a TypeInfo object and passing it to this method. * * @param type Type of the column. */ void ColumnInfo::setType(TypeInfo &type) { type_ = type; } /** * Provide an estimate for the number of unique values of a column. * * Only use this method from within the following methods: * @arg UDR::describeParamsAndColumns() * @arg UDR::describeDataflowAndPredicates() * @arg UDR::describeConstraints() * @arg UDR::describeStatistics() * * @see ColumnInfo::getEstimatedUniqueEntries() * * @param uniqueEntries Estimate of the number of unique entries or * -1 if there is no estimate. */ void ColumnInfo::setEstimatedUniqueEntries(long uniqueEntries) { estimatedUniqueEntries_ = uniqueEntries; } /** * Set the usage of the column. * * See the ColumnInfo::COLUMN_USE enum for different options. * * Only use this method from within the following method: * @arg UDR::describeParamsAndColumns() * * @param usage Usage enum value of the column. */ void ColumnInfo::setUsage(ColumnUseCode usage) { usage_ = usage; } /** * Set the provenance of an output column. * * This defines a relationship between an output column and * a column of a table-valued input from which the output value * is copied. Such columns are called pass-thru columns. See * class ProvenanceInfo for more information. * * Only use this method from within the following method: * @arg UDR::describeParamsAndColumns() * * @param provenance The provenance information. */ void ColumnInfo::setProvenance(const ProvenanceInfo &provenance) { provenance_ = provenance; } void ColumnInfo::toString(std::string &s, bool longForm) const { s += name_; if (longForm) { s += " "; type_.toString(s, longForm); if (provenance_.isFromInputTable()) { char buf[100]; snprintf(buf, sizeof(buf), " passthru(%d,%d)", provenance_.getInputTableNum(), provenance_.getInputColumnNum()); s += buf; } switch (usage_) { case UNKNOWN: case USED: // don't show anything for these "normal" cases break; case NOT_USED: s+= " (not used)"; break; case NOT_PRODUCED: s+= " (not produced)"; break; default: s+= " (invalid usage code)"; break; } if (estimatedUniqueEntries_ >= 0) { char buf[40]; snprintf(buf, sizeof(buf), " uec=%ld", estimatedUniqueEntries_); s+= buf; } } } int ColumnInfo::serializedLength() { // format: base class + name + type + int(usage) + long(uec) + // int(input table #) + int(input col #) return TMUDRSerializableObject::serializedLength() + serializedLengthOfString(name_) + type_.serializedLength() + 3 * serializedLengthOfInt() + serializedLengthOfLong(); } int ColumnInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); result += serializeString(name_, outputBuffer, outputBufferLength); result += type_.serialize(outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(usage_), outputBuffer, outputBufferLength); result += serializeLong(estimatedUniqueEntries_, outputBuffer, outputBufferLength); result += serializeInt(getProvenance().getInputTableNum(), outputBuffer, outputBufferLength); result += serializeInt(getProvenance().getInputColumnNum(), outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int ColumnInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); int tempInt1 = 0; int tempInt2 = 0; validateObjectType(COLUMN_INFO_OBJ); result += deserializeString(name_, inputBuffer, inputBufferLength); result += type_.deserialize(inputBuffer, inputBufferLength); result += deserializeInt(tempInt1, inputBuffer, inputBufferLength); usage_ = static_cast<ColumnUseCode>(tempInt1); result += deserializeLong(estimatedUniqueEntries_, inputBuffer, inputBufferLength); result += deserializeInt(tempInt1, inputBuffer, inputBufferLength); result += deserializeInt(tempInt2, inputBuffer, inputBufferLength); setProvenance(ProvenanceInfo(tempInt1, tempInt2)); validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class ConstraintInfo // ------------------------------------------------------------------------ ConstraintInfo::ConstraintInfo(ConstraintTypeCode constraintType, unsigned short version) : TMUDRSerializableObject( (constraintType == CARDINALITY ? TMUDRSerializableObject::CARDINALITY_CONSTRAINT_INFO_OBJ : (constraintType == UNIQUE ? TMUDRSerializableObject::UNIQUE_CONSTRAINT_INFO_OBJ : TMUDRSerializableObject::UNKNOWN_OBJECT_TYPE)), version), constraintType_(constraintType) { if (getObjectType() == TMUDRSerializableObject::UNKNOWN_OBJECT_TYPE) throw UDRException( 38900, "Invalid subclass in ConstraintInfo() constructor"); } /** * Get the type of the constraint. * * This allows safe casting to derived classes, based on the type. * * @return Type of the constraint. */ ConstraintInfo::ConstraintTypeCode ConstraintInfo::getType() const { return constraintType_; } int ConstraintInfo::serializedLength() { return TMUDRSerializableObject::serializedLength() + serializedLengthOfInt(); } int ConstraintInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(constraintType_), outputBuffer, outputBufferLength); return result; } int ConstraintInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); int tempInt = 0; result += deserializeInt(tempInt, inputBuffer, inputBufferLength); constraintType_ = static_cast<ConstraintTypeCode>(tempInt); return result; } // ------------------------------------------------------------------------ // Member functions for class CardinalityConstraintInfo // ------------------------------------------------------------------------ /** * Construct a new cardinality constraint. * * A cardinality constraint allows to specify a lower and/or an upper * limit for the number of rows in a table. * * @param minNumRows The minimum number of rows in the table, 0 or * a positive number. * @param maxNumRows The maximum number of rows in the table, or -1 * if there is no upper bound. If it is not -1, maxNumRows * must be greater or equal minNumRows. * @throws UDRException */ CardinalityConstraintInfo::CardinalityConstraintInfo(long minNumRows, long maxNumRows) : ConstraintInfo(CARDINALITY, getCurrentVersion()), minNumRows_(minNumRows), maxNumRows_(maxNumRows) { if (minNumRows < 0 || maxNumRows < -1 || maxNumRows >= 0 && minNumRows > maxNumRows) throw UDRException( 38900, "Invalid lower/upper bound for cardinality constraint: (%ld, %ld)", minNumRows, maxNumRows); } /** * Return the minimum number of rows in a table. * * @return Minimum number of rows (0 or a positive number). */ long CardinalityConstraintInfo::getMinNumRows() const { return minNumRows_; } /** * Return the maximum number of rows in a table. * * @return Maximum number of rows or -1 if there is no upper bound. */ long CardinalityConstraintInfo::getMaxNumRows() const { return maxNumRows_; } void CardinalityConstraintInfo::toString(const TableInfo &, std::string &s) { char buf[100]; snprintf(buf, sizeof(buf), "cardinality constraint(min=%ld, max=%ld)", minNumRows_, maxNumRows_); s += buf; } int CardinalityConstraintInfo::serializedLength() { return ConstraintInfo::serializedLength() + 2 * serializedLengthOfLong(); } int CardinalityConstraintInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = ConstraintInfo::serialize(outputBuffer, outputBufferLength); result += serializeLong(minNumRows_, outputBuffer, outputBufferLength); result += serializeLong(maxNumRows_, outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int CardinalityConstraintInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = ConstraintInfo::deserialize(inputBuffer, inputBufferLength); validateObjectType(CARDINALITY_CONSTRAINT_INFO_OBJ); result += deserializeLong(minNumRows_, inputBuffer, inputBufferLength); result += deserializeLong(maxNumRows_, inputBuffer, inputBufferLength); validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class UniqueConstraintInfo // ------------------------------------------------------------------------ /** * Default constructor for an empty uniqueness constraint. * * Use method addColumn() to add columns. */ UniqueConstraintInfo::UniqueConstraintInfo() : ConstraintInfo(UNIQUE, getCurrentVersion()) {} /** * Get the number of columns that form the unique key. * * @return Number of columns in the uniqueness constraint. */ int UniqueConstraintInfo::getNumUniqueColumns() const { return uniqueColumns_.size(); } /** * Get a column of the uniqueness constraint by iterator. * * Like in other methods, we use an integer to iterate over the * columns in the set. Note that the columns form a set, so this * number i is merely there to iterate over the set of columns. * * @param i A number between 0 and getNumUniqueColumns()-1. * @return Column number/ordinal of the unique column. * @throws UDRException */ int UniqueConstraintInfo::getUniqueColumn(int i) const { if (i < 0 || i >= uniqueColumns_.size()) throw UDRException( 38900, "Invalid index in getUniqueColumn: %d, has %d columns", i, static_cast<int>(uniqueColumns_.size())); return uniqueColumns_[i]; } /** * Add a column to a uniqueness constraint. * * @param c Column number/ordinal of one of the unique columns in the * constraint. */ void UniqueConstraintInfo::addColumn(int c) { std::vector<int>::iterator it; // insert columns ordered by number and ignore duplicates // skip over any elements < c for (it = uniqueColumns_.begin(); it != uniqueColumns_.end() && *it < c; it++) ; // insert at the current position if c is not already in the list if (it == uniqueColumns_.end() || *it > c) uniqueColumns_.insert(it, c); } void UniqueConstraintInfo::toString(const TableInfo &ti, std::string &s) { s += "unique("; for (int c=0; c<uniqueColumns_.size(); c++) { if (c>0) s += ", "; s += ti.getColumn(uniqueColumns_[c]).getColName(); } s += ")"; } int UniqueConstraintInfo::serializedLength() { return ConstraintInfo::serializedLength() + serializedLengthOfBinary(uniqueColumns_.size() * sizeof(int)); } int UniqueConstraintInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = ConstraintInfo::serialize(outputBuffer, outputBufferLength); int numCols = uniqueColumns_.size(); int *cols = new int[numCols]; for (int u=0; u<numCols; u++) cols[u] = uniqueColumns_[u]; result += serializeBinary(cols, numCols * sizeof(int), outputBuffer, outputBufferLength); delete cols; validateSerializedLength(result); return result; } int UniqueConstraintInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = ConstraintInfo::deserialize(inputBuffer, inputBufferLength); int numCols; int *cols; int binaryLength; validateObjectType(UNIQUE_CONSTRAINT_INFO_OBJ); result += deserializeBinary((const void **)&cols, binaryLength, false, inputBuffer, inputBufferLength); numCols = binaryLength / sizeof(int); for (int u=0; u<numCols; u++) uniqueColumns_.push_back(cols[u]); validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class PredicateInfo // ------------------------------------------------------------------------ PredicateInfo::PredicateInfo(TMUDRObjectType t) : TMUDRSerializableObject(t, getCurrentVersion()), evalCode_(UNKNOWN_EVAL), operator_(UNKNOWN_OP) {} /** * Get evaluation code for a predicate. * * @return Evaluation code. * @throws UDRException */ PredicateInfo::EvaluationCode PredicateInfo::getEvaluationCode() const { return static_cast<EvaluationCode>(evalCode_); } /** * Get operator code for a predicate. * * @return Operator code. * @throws UDRException */ PredicateInfo::PredOperator PredicateInfo::getOperator() const { return operator_; } /** * Check whether this predicate is a comparison predicate. * * Use this method to determine whether it is safe to cast the object * to class ComparisonPredicateInfo. * @return true if predcate i is a comparison predicate, false otherwise. */ bool PredicateInfo::isAComparisonPredicate() const { switch (operator_) { case EQUAL: case NOT_EQUAL: case LESS: case LESS_EQUAL: case GREATER: case GREATER_EQUAL: return true; default: return false; } } void PredicateInfo::setOperator(PredicateInfo::PredOperator op) { operator_ = op; } void PredicateInfo::setEvaluationCode(PredicateInfo::EvaluationCode c) { evalCode_ = c; } int PredicateInfo::serializedLength() { return TMUDRSerializableObject::serializedLength() + 2 * serializedLengthOfInt(); } int PredicateInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); result += serializeInt(evalCode_, outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(operator_), outputBuffer, outputBufferLength); // validate length in derived classes return result; } int PredicateInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); int op = 0; result += deserializeInt(evalCode_, inputBuffer, inputBufferLength); result += deserializeInt(op, inputBuffer, inputBufferLength); operator_ = static_cast<PredOperator>(op); // validate operator type and length in derived classes return result; } // ------------------------------------------------------------------------ // Member functions for class ComparisonPredicateInfo // ------------------------------------------------------------------------ ComparisonPredicateInfo::ComparisonPredicateInfo() : PredicateInfo(COMP_PREDICATE_INFO_OBJ), columnNumber_(-1) {} /** * Get the column number of the column in this comparison predicate. * * @return Column number. */ int ComparisonPredicateInfo::getColumnNumber() const { return columnNumber_; } /** * Return whether this comparison value involves a constant. * * The method returns whether the comparison predicate is of the form * "column" "op" "constant". If it returns false, the predicate * compares the column with a parameter or some other value not * available to the UDR. Predicates that do not involve a constant * cannot be evaluated in the UDR itself, since the comparison value * is not available to the UDR. They can be evaluated on a table-valued * input, however. * * @return true if the comparison is with a constant, false otherwise */ bool ComparisonPredicateInfo::hasAConstantValue() const { return (value_.size() > 0); } /** * Return the value, as a string, of the constant in this predicate. * * This returns the value, using SQL syntax, of the constant involved * in the comparison predicate. It throws an exception if method * hasAConstantValue() would return false. * * @see hasAConstantValue() * * @return Value of the constant in this comparison predicate. * @throws UDRException */ std::string ComparisonPredicateInfo::getConstValue() const { return value_; } void ComparisonPredicateInfo::setColumnNumber(int columnNumber) { columnNumber_ = columnNumber; } void ComparisonPredicateInfo::setValue(const char *value) { value_.assign(value); } void ComparisonPredicateInfo::mapColumnNumbers(const std::vector<int> &map) { if (map[columnNumber_] < 0) throw UDRException( 38900, "Invalid column mapping for column %d in a predicate", columnNumber_); columnNumber_ = map[columnNumber_]; } void ComparisonPredicateInfo::toString(std::string &s, const TableInfo &ti) const { s += ti.getColumn(columnNumber_).getColName(); switch (getOperator()) { case UNKNOWN_OP: s += " unknown operator "; break; case EQUAL: s += " = "; break; case NOT_EQUAL: s += " <> "; break; case LESS: s += " < "; break; case LESS_EQUAL: s += " <= "; break; case GREATER: s += " > "; break; case GREATER_EQUAL: s += " >= "; break; case IN: s += " in "; break; case NOT_IN: s += " not in "; break; default: s += " invalid operator "; break; } s += value_; } int ComparisonPredicateInfo::serializedLength() { return PredicateInfo::serializedLength() + serializedLengthOfInt() + serializedLengthOfString(value_); } int ComparisonPredicateInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = PredicateInfo::serialize(outputBuffer, outputBufferLength); result += serializeInt(columnNumber_, outputBuffer, outputBufferLength); result += serializeString(value_, outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int ComparisonPredicateInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = PredicateInfo::deserialize(inputBuffer, inputBufferLength); result += deserializeInt(columnNumber_, inputBuffer, inputBufferLength); result += deserializeString(value_, inputBuffer, inputBufferLength); validateObjectType(COMP_PREDICATE_INFO_OBJ); validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class PartitionInfo // ------------------------------------------------------------------------ /** * Default constructor * * Use this constructor to generate an object to be passed * to UDRInvocationInfo::setChildPartitioning(). */ PartitionInfo::PartitionInfo() : type_(UNKNOWN) {} /** * Get the partitioning type. * * @return Partition type enum. */ PartitionInfo::PartitionTypeCode PartitionInfo::getType() const { return type_; } /** * Get the number of columns that form the partitioning key * * Returns the number of columns in the list of partitioning keys * or zero if there are no such columns. * * @return Number of partitioning key columns (could be zero) */ int PartitionInfo::getNumEntries() const { return partCols_.size(); } /** * Get the number/ordinal of the ith partitioning column. * * @return Number/ordinal (0-based) of the ith partitioning column in * the list of partitioning columns. * @throws UDRException */ int PartitionInfo::getColumnNum(int i) const { if (i < 0 || i >= partCols_.size()) throw UDRException( 38900, "Trying to access column %d of a PartitionInfo with %d partitioning columns", i, partCols_.size()); return partCols_[i]; } /** * Set the partitioning type. * * @param type Partition type enum. */ void PartitionInfo::setType(PartitionTypeCode type) { type_ = type; } /** * Add a new column to the list of partitioning columns * * Add a new column to the list of column numbers that form the * partitioning key. Use this only if the type of the partitioning * is set to PARTITION. * * @param colNum Number of the column (ordinal, 0-based) of the * associated table. * @throws UDRException */ void PartitionInfo::addEntry(int colNum) { // don't allow duplicates for (std::vector<int>::iterator it = partCols_.begin(); it != partCols_.end(); it++) if (*it == colNum) throw UDRException( 38900, "Trying to add column number %d more than once to a PartitionInfo object", colNum); partCols_.push_back(colNum); } /** * Clear the contents of the object */ void PartitionInfo::clear() { type_ = UNKNOWN; partCols_.clear(); } void PartitionInfo::mapColumnNumbers(const std::vector<int> &map) { for (int i=0; i<partCols_.size(); i++) { int colNum = partCols_[i]; if (map[colNum] < 0) throw UDRException( 38900, "Invalid mapping for PARTITION BY column %d", colNum); partCols_[i] = map[colNum]; } } // ------------------------------------------------------------------------ // Member functions for class OrderInfo // ------------------------------------------------------------------------ /** * Get the number of entries (columns) in the ordering. * * @return Number of entries/columns that make up the ordering. */ int OrderInfo::getNumEntries() const { return columnNumbers_.size(); } /** * Get the column number of an entry of the ordering. * * @param i the position (0-based) of the ordering, 0 meaning the leading position. * @return The column number of the n-th entry of the ordering (both are 0-based). * @throws UDRException */ int OrderInfo::getColumnNum(int i) const { if (i < 0 || i >= columnNumbers_.size()) throw UDRException( 38900, "Trying to access colnum entry %d of an OrderInfo object with %d entries", i, columnNumbers_.size()); return columnNumbers_[i]; } /** * Get the order type of an entry of the ordering. * * @param i the position (0-based) of the ordering, 0 meaning the leading position. * @return The order type of the n-th entry of the ordering (0-based). * @throws UDRException */ OrderInfo::OrderTypeCode OrderInfo::getOrderType(int i) const { if (i < 0 || i >= orderTypes_.size()) throw UDRException( 38900, "Trying to access order type entry %d of an OrderInfo object with %d entries", i, orderTypes_.size()); return orderTypes_[i]; } /** * Append an entry to the ordering. * * @param colNum Column number to append to the ordering. * @param orderType Order type (ascending or descending) to use. */ void OrderInfo::addEntry(int colNum, OrderTypeCode orderType) { columnNumbers_.push_back(colNum); orderTypes_.push_back(orderType); } /** * Insert an entry at any position of the ordering. * * A quick example to illustrate this: Let's say we have a table * with columns (a,b,c). Their column numbers are 0, 1, and 2. * We produce an ordering (C ASCENDING): * * @code OrderInfo myorder; * * myorder.addEntryAt(0, 2); @endcode * * Next, we want to make this into (B DESCENDING, C ASCENDING): * * @code myorder.addEntryAt(0, 1, DESCENDING); @endcode * * @param pos Position (0-based) at which we want to insert. The new * entry will be position "pos" after the insertion, any * existing entries will be moved up. * @param colNum Number of the column by which we want to order * @param orderType Order type (ascending or descending) to use * @throws UDRException */ void OrderInfo::addEntryAt(int pos, int colNum, OrderTypeCode orderType) { if (pos > columnNumbers_.size()) throw UDRException( 38900, "OrderInfo::addEntryAt at position %d with a list of %d entries", pos, columnNumbers_.size()); columnNumbers_.insert(columnNumbers_.begin() + pos, colNum); orderTypes_.insert(orderTypes_.begin() + pos, orderType); } /** * Clear the contents of the object */ void OrderInfo::clear() { columnNumbers_.clear(); orderTypes_.clear(); } void OrderInfo::mapColumnNumbers(const std::vector<int> &map) { for (int i=0; i<columnNumbers_.size(); i++) { int colNum = columnNumbers_[i]; if (map[colNum] < 0) throw UDRException( 38900, "Invalid mapping for ORDER BY column %d", colNum); columnNumbers_[i] = map[colNum]; } } // ------------------------------------------------------------------------ // Member functions for class TupleInfo // ------------------------------------------------------------------------ TupleInfo::TupleInfo(TMUDRObjectType objType, int version) : TMUDRSerializableObject(objType, version), recordLength_(-1), rowPtr_(NULL), wasNull_(false) {} TupleInfo::~TupleInfo() { // delete all columns for (std::vector<ColumnInfo *>::iterator it1 = columns_.begin(); it1 != columns_.end(); it1++) delete *it1; // rowPtr_ is not owned by this object } /** * Get the number of columns or parameters. * * @return Number of columns/parameters. */ int TupleInfo::getNumColumns() const { return columns_.size(); } /** * Look up a column/parameter number by name. * * @param colName Name of an existing column. * @return Column/parameter number. * @throws UDRException */ int TupleInfo::getColNum(const char *colName) const { int result = 0; std::vector<ColumnInfo *>::const_iterator it = columns_.begin(); for (; it != columns_.end(); it++, result++) if ((*it)->getColName() == colName) return result; throw UDRException(38900, "Column %s not found", colName); } /** * Look up a column/parameter number by name. * * @param colName Name of an existing column. * @return Column/parameter number. * @throws UDRException */ int TupleInfo::getColNum(const std::string &colName) const { return getColNum(colName.c_str()); } /** * Get the column info for a column identified by its ordinal. * * @param colNum Column number. * @return Column info. * @throws UDRException */ const ColumnInfo &TupleInfo::getColumn(int colNum) const { if (colNum < 0 || colNum >= columns_.size()) throw UDRException( 38900, "Trying to access column number %d but column list has only %d elements", colNum, columns_.size()); return *(columns_[colNum]); } /** * Get the column info for a column identified by its name. * * @param colName Name of an existing column. * @return Column info. * @throws UDRException */ const ColumnInfo &TupleInfo::getColumn(const std::string &colName) const { return getColumn(getColNum(colName)); } /** * Get the non-const column info for a column identified by its ordinal. * * @param colNum Column number. * @return Column info. * @throws UDRException */ ColumnInfo &TupleInfo::getColumn(int colNum) { if (colNum < 0 || colNum >= columns_.size()) throw UDRException( 38900, "Trying to access column number %d but column list has only %d elements", colNum, columns_.size()); return *(columns_[colNum]); } /** * Get the non-const column info for a column identified by its name. * * @param colName Name of an existing column. * @return Column info. * @throws UDRException */ ColumnInfo &TupleInfo::getColumn(const std::string &colName) { return getColumn(getColNum(colName)); } /** * Get the type of a column. * * @param colNum Column number. * @return Type of the column. * @throws UDRException */ const TypeInfo &TupleInfo::getType(int colNum) const { return getColumn(colNum).getType(); } /** * Get the SQL type class. * * Determine whether this is a numeric character, datetime or interval type. * @param colNum Column number. * @return SQL type class enum. * @throws UDRException */ TypeInfo::SQLTypeClassCode TupleInfo::getSQLTypeClass(int colNum) const { return getType(colNum).getSQLTypeClass(); } /** * Add a new column. * * Only use this method from within the following method: * @arg UDR::describeParamsAndColumns() * * @param column Info of the new column to add. * @throws UDRException */ void TupleInfo::addColumn(const ColumnInfo &column) { ColumnInfo *newCol = new ColumnInfo(column); columns_.push_back(newCol); } /** * Get an integer value of a column or parameter * * This method is modeled after the JDBC interface. * * Use this method at runtime. It can also be used for * actual parameters that are available at compile time. * * @param colNum Column number. * @return Integer value. * If the value was a NULL value, then 0 is returned. * The wasNull() method can be used to determine whether * a NULL value was returned. * @throws UDRException */ int TupleInfo::getInt(int colNum) const { bool &nonConstWasNull = const_cast<TupleInfo *>(this)->wasNull_; nonConstWasNull = false; return getType(colNum).getInt(rowPtr_, nonConstWasNull); } /** * Get an integer value for a column identified by name. * * @see TupleInfo::getInt(int) const * * @param colName Name of an existing column. * @return Integer value. * If the value was a NULL value, then 0 is returned. * The wasNull() method can be used to determine whether * a NULL value was returned. * @throws UDRException */ int TupleInfo::getInt(const std::string &colName) const { return getInt(getColNum(colName)); } /** * Get a long value of a column or parameter * * This method is modeled after the JDBC interface. * * Use this method at runtime. It can also be used for * actual parameters that are available at compile time. * * @param colNum Column number. * @return long value. * If the value was a NULL value, then 0 is returned. * The wasNull() method can be used to determine whether * a NULL value was returned. * @throws UDRException */ long TupleInfo::getLong(int colNum) const { bool &nonConstWasNull = const_cast<TupleInfo *>(this)->wasNull_; nonConstWasNull = false; return getType(colNum).getLong(rowPtr_, nonConstWasNull); } /** * Get a long value for a column identified by name. * * @see TupleInfo::getLong(int) const * * @param colName Name of an existing column. * @return long value. * @throws UDRException */ long TupleInfo::getLong(const std::string &colName) const { return getLong(getColNum(colName)); } /** * Get a double value of a column or parameter * * This method is modeled after the JDBC interface. * * Use this method at runtime. It can also be used for * actual parameters that are available at compile time. * * @param colNum Column number. * @return double value. * @throws UDRException */ double TupleInfo::getDouble(int colNum) const { bool &nonConstWasNull = const_cast<TupleInfo *>(this)->wasNull_; nonConstWasNull = false; return getType(colNum).getDouble(rowPtr_, nonConstWasNull); } /** * Get double value of a column/parameter identified by name. * * @see TupleInfo::getDouble(int colNum) const * * @param colName Name of an existing column. * @return double value. * @throws UDRException */ double TupleInfo::getDouble(const std::string &colName) const { return getDouble(getColNum(colName)); } /** * Get a pointer to the raw data value of a column. * * Using this method requires knowledge of the data layout * for the different types used in UDRs. This method can be * useful for performance optimizations, when converting longer * string values to std::string is undesirable. Note that the * pointer to the raw value is valid only until a new row * is read or the existing row is emitted. * * Use this method at runtime. It can also be used for * actual parameters that are available at compile time. * * @param colNum Column number. * @param byteLen Length, in bytes, of the value returned. * @return Pointer to the raw column value in the row buffer. * @throws UDRException */ const char * TupleInfo::getRaw(int colNum, int &byteLen) const { bool &nonConstWasNull = const_cast<TupleInfo *>(this)->wasNull_; nonConstWasNull = false; return getType(colNum).getRaw(rowPtr_, nonConstWasNull, byteLen); } /** * Get a datetime or interval column value as time_t * * This method can be used to convert column values with * a datetime type or a day-second interval type to the * POSIX type time_t. Note that this may result in the loss * of fractional seconds. * * Use this method at runtime. It can also be used for * actual parameters that are available at compile time. * * @param colNum Column number. * @throws UDRException */ time_t TupleInfo::getTime(int colNum) const { bool &nonConstWasNull = const_cast<TupleInfo *>(this)->wasNull_; nonConstWasNull = false; return getType(colNum).getTime(rowPtr_, nonConstWasNull); } /** * Check whether a parameter is available at compile-time. * * Use this method to check in the compiler interfaces whether * an actual parameter is a constant value that can be read * at compile time. If this method returns true, the value * can be accessed with the getInt(), getString() etc. methods. * * @param colNum Column number. * @return true if the parameter value is available. * @throws UDRException */ bool TupleInfo::isAvailable(int colNum) const { return (rowPtr_ != NULL && colNum < columns_.size() && getType(colNum).isAvailable()); } /** * Get columns of a row as a delimited string. * * This method is useful to interface with tools that take a delimited * record format. It is also useful for printing rows * (see UDRInvocationInfo::TRACE_ROWS). * * Only use this method at runtime. * * Note: This method may return a string that contains multiple * character sets, if columns with different character sets * are involved. Using this method with UCS-2 columns is not * recommended. * * @param row String reference in which the result delimited row * will be returned. * @param delim US ASCII field delimiter to use. * @param quote Whether to quote character field values that contain * the delimiter symbol or a quote symbol. Quote symbols * will be duplicated to escape them. * @param quoteSymbol US ASCII quote character to use, if quote is true. * @param firstColumn First column to read. * @param lastColumn Last column to read (inclusive) or -1 to read until * the last column in the row. * @throws UDRException */ void TupleInfo::getDelimitedRow(std::string &row, char delim, bool quote, char quoteSymbol, int firstColumn, int lastColumn) const { // read all columns and form a delimited text row from them // if quote is true, then quote any text that contains the delimiter // and also double any quotes appearing in the text int nc = getNumColumns(); if (firstColumn >= nc || firstColumn < 0 || lastColumn < -1 || lastColumn > 0 && lastColumn >= nc) throw UDRException( 38900, "Invalid column range %d to %d in getDelimitedRow for a tuple with %d columns", firstColumn, lastColumn, nc); if (lastColumn == -1) lastColumn = nc-1; row.erase(); for (int i=firstColumn; i<=lastColumn; i++) { std::string val=getString(i); if (i>firstColumn) row.push_back(delim); if (!wasNull()) { if (quote) { bool quoteTheString=false; // replace all quotes with two quotes for (std::string::iterator it = val.begin(); it != val.end(); it++) if (*it == quoteSymbol) { quoteTheString = true; it++; val.insert(it,quoteSymbol); } else if (*it == delim) quoteTheString = true; // if we found a quote or a delimiter in the // string, then quote it if (quoteTheString) { val.insert(0,1,quoteSymbol); val.push_back(quoteSymbol); } } // quote row += val; } // value is not NULL } // loop over columns } /** * Get a string value of a column or parameter * * This method is modeled after the JDBC interface. * * Use this method at runtime. It can also be used for * actual parameters that are available at compile time. * * @param colNum Column number. * @return String value. * If the value was a NULL value, an empty string * is returned. The wasNull() method can be used to * determine whether a NULL value was returned. * @throws UDRException */ std::string TupleInfo::getString(int colNum) const { int stringLen = 0; TypeInfo::SQLTypeCode sqlType = getType(colNum).getSQLType(); switch (sqlType) { case TypeInfo::DECIMAL_LSE: case TypeInfo::DECIMAL_UNSIGNED: case TypeInfo::CHAR: case TypeInfo::VARCHAR: case TypeInfo::DATE: case TypeInfo::TIME: case TypeInfo::TIMESTAMP: case TypeInfo::BLOB: case TypeInfo::CLOB: { // these types are stored as strings const char *buf = getRaw(colNum, stringLen); if (buf) return std::string(buf, stringLen); else return std::string(""); } case TypeInfo::SMALLINT: case TypeInfo::INT: case TypeInfo::LARGEINT: case TypeInfo::NUMERIC: case TypeInfo::SMALLINT_UNSIGNED: case TypeInfo::INT_UNSIGNED: case TypeInfo::NUMERIC_UNSIGNED: { char buf[32]; long num = getLong(colNum); if (wasNull_) return ""; snprintf(buf, sizeof(buf), "%ld", num); return buf; } case TypeInfo::REAL: case TypeInfo::DOUBLE_PRECISION: { char buf[32]; double num = getDouble(colNum); // see also constants SQL_FLOAT_FRAG_DIGITS and // SQL_DOUBLE_PRECISION_FRAG_DIGITS in file // trafodion/core/sql/common/SQLTypeDefs.h int numSignificantDigits = 17; if (sqlType == TypeInfo::REAL) numSignificantDigits = 7; if (wasNull_) return ""; snprintf(buf, sizeof(buf), "%*lf", numSignificantDigits, num); return buf; } case TypeInfo::INTERVAL: { char buf[32]; long longVal = getLong(colNum); long fractionalVal = 0; const TypeInfo typ = getType(colNum); TypeInfo::SQLIntervalCode intervalCode = typ.getIntervalCode(); int precision = typ.getPrecision(); int scale = typ.getScale(); const char *sign = ""; const char *dot = (scale == 0 ? "" : "."); if (wasNull_) return ""; if (longVal < 0) { longVal = -longVal; sign = "-"; } // split the number into integer and fractional values for (int d=0; d<scale; d++) { fractionalVal = 10*fractionalVal + longVal % 10; longVal /= 10; } switch (intervalCode) { case TypeInfo::INTERVAL_YEAR: case TypeInfo::INTERVAL_MONTH: case TypeInfo::INTERVAL_DAY: case TypeInfo::INTERVAL_HOUR: case TypeInfo::INTERVAL_MINUTE: // Example: "59" snprintf(buf, sizeof(buf), "%s%*ld", sign, precision, longVal); break; case TypeInfo::INTERVAL_SECOND: // Example: "99999.000001" snprintf(buf, sizeof(buf), "%s%*ld%s%0*ld", sign, precision, longVal, dot, scale, fractionalVal); break; case TypeInfo::INTERVAL_YEAR_MONTH: // Example: "100-01" snprintf(buf, sizeof(buf), "%s%*ld-%02d", sign, precision, (long) (longVal/12), (int) (longVal%12)); break; case TypeInfo::INTERVAL_DAY_HOUR: // Example: "365 06" snprintf(buf, sizeof(buf), "%s%*ld %02d", sign, precision, (long) (longVal/24), (int) (longVal%24)); break; case TypeInfo::INTERVAL_DAY_MINUTE: // Example: "365:05:49" snprintf(buf, sizeof(buf), "%s%*ld %02d:%02d", sign, precision, (long) (longVal/1440), (int) (longVal%1440/60), (int) (longVal%60)); break; case TypeInfo::INTERVAL_DAY_SECOND: // Example: "365:05:49:12.00" snprintf(buf, sizeof(buf), "%s%*ld %02d:%02d:%02d%s%0*ld", sign, precision, (long) (longVal/86400), (int) (longVal%86400/3600), (int) (longVal%3600/60), (int) (longVal%60), dot, scale, fractionalVal); break; case TypeInfo::INTERVAL_HOUR_MINUTE: // Example: "12:00" snprintf(buf, sizeof(buf), "%s%*ld:%02d", sign, precision, (long) (longVal/60), (int) (longVal%60)); break; case TypeInfo::INTERVAL_HOUR_SECOND: // Example: "100:00:00" snprintf(buf, sizeof(buf), "%s%*ld:%02d:%02d%s%0*ld", sign, precision, (long) (longVal/3600), (int) (longVal%3600/60), (int) (longVal%60), dot, scale, fractionalVal); break; case TypeInfo::INTERVAL_MINUTE_SECOND: // Example: "3600:00.000000" snprintf(buf, sizeof(buf), "%s%*ld:%02d%s%0*ld", sign, precision, (long) (longVal/60), (int) (longVal%60), dot, scale, fractionalVal); break; default: throw UDRException( 38900, "Invalid interval code in TypeInfo::getString()"); } return buf; } default: throw UDRException( 38900, "Type %d not yet supported in getString()", sqlType); } } /** * Get a string value of a column or parameter identified by name. * * This method is modeled after the JDBC interface. * * Use this method at runtime. It cannot be used for * actual parameters that are available at compile time, use * getString(int colNum) instead, since actual parameters are not named. * * @param colName Name of an existing column. * @return String value. * If the value was a NULL value, an empty string * is returned. The wasNull() method can be used to * determine whether a NULL value was returned. * @throws UDRException */ std::string TupleInfo::getString(const std::string &colName) const { return getString(getColNum(colName)); } /** * Check whether the last value returned from a getInt() etc. method was NULL. * * This method is modeled after the JDBC interface. * * @return true if the last value returned from a getInt(), getString() * etc. method was a NULL value, false otherwise. */ bool TupleInfo::wasNull() const { return wasNull_; } /** * Set an output column to a specified integer value. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set. * @param val The new integer value for the column to set. * @throws UDRException */ void TupleInfo::setInt(int colNum, int val) const { getType(colNum).setInt(val, rowPtr_); } /** * Set an output column to a specified long value. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set. * @param val The new long value for the column to set. * @throws UDRException */ void TupleInfo::setLong(int colNum, long val) const { getType(colNum).setLong(val, rowPtr_); } /** * Set an output column to a specified double value. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set. * @param val The new double value for the column to set. * @throws UDRException */ void TupleInfo::setDouble(int colNum, double val) const { getType(colNum).setDouble(val, rowPtr_); } /** * Set an output column to a specified string value. * * Use this method at runtime. The length of the string is determined * by calling strlen(). * * @param colNum Index/ordinal of the column to set. * @param val The new string value for the column to set. * The length of the string is determined by calling strlen. * @throws UDRException */ void TupleInfo::setString(int colNum, const char *val) const { setString(colNum, val, strlen(val)); } /** * Set an output column to a specified string value. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set. * @param val The new string value for the column to set. * @param stringLen Length (in bytes) of the string value provided. * The string may contain embedded NUL bytes. * @throws UDRException */ void TupleInfo::setString(int colNum, const char *val, int stringLen) const { getType(colNum).setString(val, stringLen, rowPtr_); } /** * Set an output column to a specified string value. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set. * @param val The new string value for the column to set. * @throws UDRException */ void TupleInfo::setString(int colNum, const std::string &val) const { setString(colNum, val.data(), val.size()); } /** * Set a datetime or interval output column to a value specified as time_t * * This method cannot be used with year-month intervals or data types that * are not datetime or interval types. It is not possible to set fractional * seconds with this method. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set. * @param val The new time_t value for the column to set. * @throws UDRException */ void TupleInfo::setTime(int colNum, time_t val) const { getType(colNum).setTime(val, rowPtr_); } /** * Set the result row from a string with delimited field values. * * This method can be used to read delimited text files and * conveniently produce a result table from them. For example, * if the following string is passed in as row: * @code skip1|'skip|2'|3|'delim|and''Quote'|5 @endcode * This call: * @code setFromDelimitedRow( * row, // row * '|', // delim * true, // quote * '\'', // quoteSymbol (single quote) * 10, // firstColumnToSet * 11, // lastColumnToSet * 2); // numDelimColsToSkip @endcode * would set output column 10 to 3 and output column 11 to delim|and'Quote. * * Note: The delimited row may need to contain strings of multiple * character sets. Using this method with UCS2 columns is not * recommended, since that might require special handling. * * @see getDelimitedRow() * * @param row A string with delimited field values to read. * @param delim Delimiter between field values. Use a US ASCII symbol * as the delimiter. * @param quote true if the method should assume that text fields * use quotes to quote special symbols like delimiters * that are embedded within fields, and that quote symbols * embedded in text fields will be doubled. * @param quoteSymbol US ASCII Quote symbol used to quote text. Meaningful * only if quote is set to true. * @param firstColumnToSet First column in the output table to be set * from the delimited row (0-based). * @param lastColumnToSet Last column in the output table to be set * (inclusive) or -1 to indicate to set all * remaining columns of the table. * @param numDelimColsToSkip Number of fields to skip in the delimited * row before using the values to set output * columns. * @return Pointer to the first character after the * text that has been consumed by this method. * @throws UDRException */ const char * TupleInfo::setFromDelimitedRow(const char *row, char delim, bool quote, char quoteSymbol, int firstColumnToSet, int lastColumnToSet, int numDelimColsToSkip) const { int nc = getNumColumns(); const char *c = row; // virtual start column number of the first column in the delimited row // we may need to skip some values to reach the first one to use int startCol = firstColumnToSet-numDelimColsToSkip; if (firstColumnToSet >= nc || firstColumnToSet < 0 || lastColumnToSet < -1 || lastColumnToSet > 0 && (lastColumnToSet >= nc || firstColumnToSet > lastColumnToSet)) throw UDRException( 38900, "Invalid column range %d to %d in setFromDelimitedRow for a tuple with %d columns", firstColumnToSet, lastColumnToSet, nc); if (lastColumnToSet == -1) lastColumnToSet = nc-1; for (int i=startCol; i<=lastColumnToSet; i++) { // skip over whitespace while (*c == ' ' || *c == '\t') c++; // make sure we have a delimiter for columns other than the first if (i>startCol) { if (*c != delim) throw UDRException( 38900, "Expected delimiter at position %d in string %s", c-row, row); // skip over the delimiter and white space c++; while (*c == ' ' || *c == '\t') c++; } // find the end of the column value const char *endOfVal = c; if (quote && *c == quoteSymbol) { // read and set a quoted string bool embeddedQuote = false; bool done = false; endOfVal = ++c; // find the matching end to the quote while (*endOfVal != 0 && !done) if (*endOfVal == quoteSymbol) if (endOfVal[1] == quoteSymbol) { // skip over both quotes embeddedQuote = true; endOfVal += 2; } else // found the terminating quote done = true; else endOfVal++; if (!done) throw UDRException( 38900, "missing quote at the end of column %d in string %s", i, row); if (embeddedQuote) { // need to transform the double doublequotes // in a separate buffer std::string unquotedVal(c, (endOfVal-c)); std::string::iterator it = unquotedVal.begin(); while (it != unquotedVal.end()) if (*it == quoteSymbol) it = unquotedVal.erase(it); else it++; if (i >= firstColumnToSet) // set from the transformed string setString(i, unquotedVal); } else { if (i >= firstColumnToSet) // set from the value between the quotes setString(i, c, (endOfVal-c)); // skip over the trailing quote endOfVal++; } } else { // c points to the beginning of the field value // find the next delimiter or the end of the // record and treat white space only as a NULL // value bool isNull = true; while (*endOfVal != 0 && *endOfVal != delim) { if (isNull && *endOfVal != ' ' && *endOfVal != '\t') isNull = false; endOfVal++; } if (i >= firstColumnToSet) { if (isNull) setNull(i); else setString(i, c, (endOfVal-c)); } } // set the current character pointer to the // character just past of what we have consumed c = endOfVal; } return c; } /** * Set an output column to a NULL value. * * Use this method at runtime. * * @param colNum Index/ordinal of the column to set to NULL. * @throws UDRException */ void TupleInfo::setNull(int colNum) const { getType(colNum).setNull(rowPtr_); } /** * Add an integer output column. * * The new column is added at the end. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param colName Name of the column to add. Use UPPER CASE letters, * digits and underscore, otherwise you will need to * use delimited column names with matching case in * Trafodion. * @param isNullable true if the added column should be nullable, * false if the added column should have the NOT NULL * constraint. * @throws UDRException */ void TupleInfo::addIntColumn(const char *colName, bool isNullable) { addColumn(ColumnInfo(colName, TypeInfo(TypeInfo::INT,0,isNullable))); } /** * Add a long output column. * * The new column is added at the end. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param colName Name of the column to add. Use UPPER CASE letters, * digits and underscore, otherwise you will need to * use delimited column names with matching case in * Trafodion. * @param isNullable true if the added column should be nullable, * false if the added column should have the NOT NULL * constraint. * @throws UDRException */ void TupleInfo::addLongColumn(const char *colName, bool isNullable) { addColumn(ColumnInfo(colName, TypeInfo(TypeInfo::LARGEINT,0,isNullable))); } /** * Add a fixed character output column. * * The new column is added at the end. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param colName Name of the column to add. Use UPPER CASE letters, * digits and underscore, otherwise you will need to * use delimited column names with matching case in * Trafodion. * @param length Length of the new character column. * For single-byte and variable byte character sets, * the length is specified in bytes. For UTF-8, this * is equivalent to CHAR(length BYTES) in SQL. For UCS2, * the length is in UCS2 16-bit characters. * @param isNullable true if the added column should be nullable, * false if the added column should have the NOT NULL * constraint. * @param charset Character set of the new column. * @param collation Collation of the new column. * @throws UDRException */ void TupleInfo::addCharColumn(const char *colName, int length, bool isNullable, TypeInfo::SQLCharsetCode charset, TypeInfo::SQLCollationCode collation) { addColumn(ColumnInfo(colName, TypeInfo(TypeInfo::CHAR, length, isNullable, 0, charset, TypeInfo::UNDEFINED_INTERVAL_CODE, 0, collation))); } /** * Add a VARCHAR output column. * * The new column is added at the end. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param colName Name of the column to add. Use UPPER CASE letters, * digits and underscore, otherwise you will need to * use delimited column names with matching case in * Trafodion. * @param length Length of the new character column. * For single-byte and variable byte character sets, * the length is specified in bytes. For UTF-8, this * is equivalent to CHAR(length BYTES). For UCS2, the * length is in UCS2 16-bit characters. * @param isNullable true if the added column should be nullable, * false if the added column should have the NOT NULL * constraint. * @param charset Character set of the new column. * @param collation Collation of the new column. * @throws UDRException */ void TupleInfo::addVarCharColumn(const char *colName, int length, bool isNullable, TypeInfo::SQLCharsetCode charset, TypeInfo::SQLCollationCode collation) { addColumn(ColumnInfo(colName, TypeInfo(TypeInfo::VARCHAR, length, isNullable, 0, charset, TypeInfo::UNDEFINED_INTERVAL_CODE, 0, collation))); } /** * Add multiple columns to the table-valued output. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param columns Vector of ColumnInfo objects describing the columns to add. * @throws UDRException */ void TupleInfo::addColumns(const std::vector<ColumnInfo *> &columns) { for (std::vector<ColumnInfo *>::const_iterator it = columns.begin(); it != columns.end(); it++) addColumn(**it); } /** * Add a new column at a specified position. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param column ColumnInfo object describing the new column. * @param position Position/ordinal number of the new column. * All existing columns with ordinal numbers * greater or equal to position will be shifted by one. * @throws UDRException */ void TupleInfo::addColumnAt(const ColumnInfo &column, int position) { ColumnInfo *newCol = new ColumnInfo(column); columns_.insert(columns_.begin() + position, newCol); } /** * Delete a column of the table-valued output. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @param i Position/ordinal (0-based) of column to be deleted. * @throws UDRException */ void TupleInfo::deleteColumn(int i) { std::vector<ColumnInfo *>::iterator it = columns_.begin() + i; if (it != columns_.end()) { delete *it; columns_.erase(it); } else throw UDRException(38906, "Column number %d not found", i); } /** * Delete a column with a specified column name. * * The first column that matches the specified column name * will be deleted. * * @param name Name of the column to be deleted. * @throws UDRException */ void TupleInfo::deleteColumn(const std::string &name) { deleteColumn(getColNum(name)); } /** * Print the object, for use in debugging. * * @see UDR::debugLoop() * @see UDRInvocationInfo::PRINT_INVOCATION_INFO_AT_RUN_TIME */ void TupleInfo::print() { printf(" Number of columns : %d\n", getNumColumns()); printf(" Columns : \n"); for (int c=0; c<getNumColumns(); c++) { std::string colString; getColumn(c).toString(colString, true); printf(" %s\n", colString.c_str()); } if (recordLength_ >= 0) printf(" Record length : %d\n", recordLength_); } int TupleInfo::serializedLength() { // format: base class + int(#cols) + n*ColumnInfo + int(recordLength_) // rowPtr_ is not serialized int result = TMUDRSerializableObject::serializedLength() + 2 * serializedLengthOfInt(); for (int c=0; c<getNumColumns(); c++) result += getColumn(c).serializedLength(); return result; } int TupleInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); result += serializeInt(getNumColumns(), outputBuffer, outputBufferLength); for (int c=0; c<getNumColumns(); c++) result += getColumn(c).serialize(outputBuffer, outputBufferLength); result += serializeInt(recordLength_, outputBuffer, outputBufferLength); if (getObjectType() == TUPLE_INFO_OBJ) validateSerializedLength(result); return result; } int TupleInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); // Caller needs to validate the object type when they are // serializing this class, since derived objects exist, so we // can't unconditionally expect a given object type here // validateObjectType(TABLE_INFO_OBJ); int numCols = 0; result += deserializeInt(numCols, inputBuffer, inputBufferLength); // delete all existing columns for (std::vector<ColumnInfo *>::iterator it1 = columns_.begin(); it1 != columns_.end(); it1++) delete *it1; columns_.clear(); for (int c=0; c<numCols; c++) { ColumnInfo ci; result += ci.deserialize(inputBuffer, inputBufferLength); addColumn(ci); } result += deserializeInt(recordLength_, inputBuffer, inputBufferLength); // leave rowPtr_ intact, the row is not serialized/ // deserialized with this object if (getObjectType() == TUPLE_INFO_OBJ) validateDeserializedLength(result); return result; } char * TupleInfo::getRowPtr() const { return rowPtr_; } /** * Get the record length of a row. * * This method returns the approximate record length of the tuple at * compile time and the actual (non-compressed) record length at * runtime. This might be useful for cost estimation, otherwise it * can be ignored by UDF writers. * * @return Record length in bytes. */ int TupleInfo::getRecordLength() const { return recordLength_; } void TupleInfo::setRecordLength(int len) { recordLength_ = len; } void TupleInfo::setRowPtr(char *ptr) { rowPtr_ = ptr; } // ------------------------------------------------------------------------ // Member functions for class TableInfo // ------------------------------------------------------------------------ TableInfo::TableInfo() : TupleInfo(TABLE_INFO_OBJ, getCurrentVersion()), estimatedNumRows_(-1), estimatedNumPartitions_(-1) {} TableInfo::~TableInfo() { // delete all constraints for (std::vector<ConstraintInfo *>::iterator it2 = constraints_.begin(); it2 != constraints_.end(); it2++) delete *it2; } /** * Get the estimated number of rows of this table. * * @see setEstimatedNumRows() * @see getEstimatedNumPartitions() * * @return Estimated number of rows or -1 if there is no estimate. */ long TableInfo::getEstimatedNumRows() const { return estimatedNumRows_; } /** * For tables with a PARTITION BY, get estimated number of partitions. * * @see getEstimatedNumRows() * @see setEstimatedNumRows() * * @return Estimated number of partitions or -1 if there is no estimate or no PARTITION BY. */ long TableInfo::getEstimatedNumPartitions() const { return estimatedNumPartitions_; } /** * Get the PARTITION BY clause for this input table. * * This returns either the PARTITION BY clause specified in the * SQL query, or the updated partitioning information, set by * UDRInvocationInfo::setChildPartitioning(), called during * UDR::describeParamsAndColumns(). * * @return Partitioning clause for this input table. */ const PartitionInfo &TableInfo::getQueryPartitioning() const { return queryPartitioning_; } // non-const version PartitionInfo &TableInfo::getQueryPartitioning() { return queryPartitioning_; } /** * Get the ORDER BY clause for this input table. * * This returns either the ORDER BY clause specified in the * SQL query, or the updated ordering information, set by * UDRInvocationInfo::setChildOrdering(), called during * UDR::describeParamsAndColumns(). * * @return Ordering clause for this input table. */ const OrderInfo &TableInfo::getQueryOrdering() const { return queryOrdering_; } // non-const version OrderInfo &TableInfo::getQueryOrdering() { return queryOrdering_; } /** * Returns whether the UDF result is treated as a continuous stream. * * Note: This is currently not supported. The method always returns false * for now. * * @return true if the UDF result is a stream, false otherwise. */ bool TableInfo::isStream() const { return false; } /** * Get the number of constraints defined on this table. * * @return Number of constraints defined on this table. */ int TableInfo::getNumConstraints() const { return constraints_.size(); } /** * Get a constraint by index/ordinal number. * * @param i index/ordinal (0-based) of the constraint. * @return Constraint for a given index/ordinal. * @throws UDRException */ const ConstraintInfo &TableInfo::getConstraint(int i) const { if (i < 0 || i >= constraints_.size()) throw UDRException( 38900, "Trying to access constraint %d of a ConstraintInfo object with %d constraints", i, constraints_.size()); return *(constraints_[i]); } /** * Set the estimated number of rows for a UDF table-valued result. * * Setting this value can help the Trafodion optimizer generate a better * plan for queries containing table-valued UDFs. Note that this is only * an estimate, a strict correspondence to the actual number of rows * returned at runtime is not required. * * Only use this method from within the following methods: * @arg UDR::describeParamsAndColumns() * @arg UDR::describeDataflowAndPredicates() * @arg UDR::describeConstraints() * @arg UDR::describeStatistics() * * @param rows Estimated number of rows for this table. */ void TableInfo::setEstimatedNumRows(long rows) { estimatedNumRows_ = rows; } /** * Add a cardinality constraint to the UDF table-valued output. * * Only use this method from within the following methods: * @arg UDR::describeParamsAndColumns() * @arg UDR::describeDataflowAndPredicates() * @arg UDR::describeConstraints() * * @param constraint New constraint to add. The object needs to be * deallocated by the caller after this call returns. * @throws UDRException */ void TableInfo::addCardinalityConstraint( const CardinalityConstraintInfo &constraint) { ConstraintInfo *newConstr = new CardinalityConstraintInfo(constraint); constraints_.push_back(newConstr); } /** * Add a uniqueness constraint to the UDF table-valued output. * * Only use this method from within the following methods: * @arg UDR::describeParamsAndColumns() * @arg UDR::describeDataflowAndPredicates() * @arg UDR::describeConstraints() * * @param constraint New uniqueness constraint to add. The object needs * to be deallocated by the caller after this call returns. * @throws UDRException */ void TableInfo::addUniquenessConstraint( const UniqueConstraintInfo &constraint) { ConstraintInfo *newConstr = new UniqueConstraintInfo(constraint); constraints_.push_back(newConstr); } /** * Set whether a table should be treated as a stream. * * This method is not yet supported. * * @param stream true if the table is a stream, false otherwise. * @throws UDRException */ void TableInfo::setIsStream(bool stream) { if (stream) throw UDRException(38908, "Stream tables not yet supported"); } /** * Print the object, for use in debugging. * * @see UDR::debugLoop() * @see UDRInvocationInfo::PRINT_INVOCATION_INFO_AT_RUN_TIME */ void TableInfo::print() { TupleInfo::print(); printf(" Estimated number of rows : %ld\n", getEstimatedNumRows()); printf(" Partitioning : "); switch (getQueryPartitioning().getType()) { case PartitionInfo::UNKNOWN: printf("unknown\n"); break; case PartitionInfo::ANY: printf("any\n"); break; case PartitionInfo::SERIAL: printf("serial\n"); break; case PartitionInfo::PARTITION: { bool needsComma = false; printf("("); for (int p=0; p<getQueryPartitioning().getNumEntries(); p++) { if (needsComma) printf(", "); printf("%s", getColumn(getQueryPartitioning().getColumnNum(p)).getColName().c_str()); needsComma = true; } printf(")\n"); printf(" Estimated # of partitions: %ld\n", getEstimatedNumPartitions()); } break; case PartitionInfo::REPLICATE: printf("replicate\n"); break; default: printf("invalid partitioning specification!\n"); break; } printf(" Ordering : "); if (getQueryOrdering().getNumEntries() > 0) { printf("("); for (int o=0; o<getQueryOrdering().getNumEntries(); o++) { if (o>0) printf(", "); printf("%s", getColumn( getQueryOrdering().getColumnNum(o)).getColName().c_str()); OrderInfo::OrderTypeCode ot = getQueryOrdering().getOrderType(o); if (ot == OrderInfo::DESCENDING) printf(" DESC"); else if (ot != OrderInfo::ASCENDING) printf(" - invalid order type!"); } printf(")\n"); } else printf("none\n"); if (constraints_.size() > 0) { printf(" Constraints :\n"); for (int c=0; c<constraints_.size(); c++) { std::string s = " "; constraints_[c]->toString(*this, s); printf("%s\n", s.c_str()); } } } void TableInfo::setQueryPartitioning(const PartitionInfo &partInfo) { queryPartitioning_ = partInfo; } void TableInfo::setQueryOrdering(const OrderInfo &orderInfo) { queryOrdering_ = orderInfo; } int TableInfo::serializedLength() { // format: base class + long(numRows) + long(numParts) + // int(#part cols) + int(#order cols) + // binary array of ints: // p*int(partkeycol#) + // o*(int(ordercol#) + int(ordering)) + // int(#constraints) + constraints int result = TupleInfo::serializedLength() + 2 * serializedLengthOfLong() + 4 * serializedLengthOfInt() + serializedLengthOfBinary( (getQueryPartitioning().getNumEntries() + 2 * getQueryOrdering().getNumEntries()) * sizeof(int)); for (int c=0; c<constraints_.size(); c++) result += constraints_[c]->serializedLength(); return result; } int TableInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TupleInfo::serialize(outputBuffer, outputBufferLength); int numPartCols = queryPartitioning_.getNumEntries(); int numOrderCols = queryOrdering_.getNumEntries(); int numConstraints = constraints_.size(); int *intArray = new int[numPartCols + 2*numOrderCols]; int c; result += serializeLong(estimatedNumRows_, outputBuffer, outputBufferLength); result += serializeLong(estimatedNumPartitions_, outputBuffer, outputBufferLength); result += serializeInt(numPartCols, outputBuffer, outputBufferLength); result += serializeInt(numOrderCols, outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(queryPartitioning_.getType()), outputBuffer, outputBufferLength); for (c=0; c<numPartCols; c++) intArray[c] = queryPartitioning_.getColumnNum(c); for (c=0; c<numOrderCols; c++) { intArray[numPartCols+2*c] = queryOrdering_.getColumnNum(c); intArray[numPartCols+2*c+1] = static_cast<int>(queryOrdering_.getOrderType(c)); } result += serializeBinary( intArray, (numPartCols + 2*numOrderCols) * sizeof(int), outputBuffer, outputBufferLength); delete intArray; result += serializeInt(numConstraints, outputBuffer, outputBufferLength); for (c=0; c<numConstraints; c++) result += constraints_[c]->serialize(outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int TableInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TupleInfo::deserialize(inputBuffer, inputBufferLength); validateObjectType(TABLE_INFO_OBJ); int numCols = 0; int numPartCols = 0; int numOrderCols = 0; int numConstraints = 0; int partType = 0; const int *intArray = NULL; int binarySize = 0; int c; result += deserializeLong(estimatedNumRows_, inputBuffer, inputBufferLength); result += deserializeLong(estimatedNumPartitions_, inputBuffer, inputBufferLength); result += deserializeInt(numPartCols, inputBuffer, inputBufferLength); result += deserializeInt(numOrderCols, inputBuffer, inputBufferLength); result += deserializeInt(partType, inputBuffer, inputBufferLength); result += deserializeBinary((const void **) &intArray, binarySize, false, inputBuffer, inputBufferLength); if (binarySize != (numPartCols + 2*numOrderCols) * sizeof(int)) throw UDRException(38900, "Invalid int array size in TableInfo, got %d, expected %d", binarySize, (numPartCols + 2*numOrderCols) * sizeof(int)); queryPartitioning_.clear(); queryPartitioning_.setType( static_cast<PartitionInfo::PartitionTypeCode>(partType)); for (c=0; c<numPartCols; c++) queryPartitioning_.addEntry(intArray[c]); queryOrdering_.clear(); for (c=0; c<numOrderCols; c++) queryOrdering_.addEntry( intArray[numPartCols+2*c], static_cast<OrderInfo::OrderTypeCode>(intArray[numPartCols+2*c+1])); // delete all constraints for (std::vector<ConstraintInfo *>::iterator it2 = constraints_.begin(); it2 != constraints_.end(); it2++) delete *it2; constraints_.clear(); result += deserializeInt(numConstraints, inputBuffer, inputBufferLength); for (c=0; c<numConstraints; c++) { ConstraintInfo *constr = NULL; // look ahead what the next object type is and allocate // an empty object of the appropriate subclass switch (getNextObjectType(inputBuffer, inputBufferLength)) { case CARDINALITY_CONSTRAINT_INFO_OBJ: constr = new CardinalityConstraintInfo(); break; case UNIQUE_CONSTRAINT_INFO_OBJ: constr = new UniqueConstraintInfo(); break; default: throw UDRException( 38900, "Invalid object type during constraint deserialization: %d", static_cast<int>(getNextObjectType(inputBuffer, inputBufferLength))); } // deserialize the object and add it to the list of constraints result += constr->deserialize(inputBuffer, inputBufferLength); constraints_.push_back(constr); } validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class ParameterListInfo // ------------------------------------------------------------------------ ParameterListInfo::ParameterListInfo() : TupleInfo(PARAMETER_LIST_INFO_OBJ, getCurrentVersion()) { } ParameterListInfo::~ParameterListInfo() { } int ParameterListInfo::serializedLength() { // format: Base class return TupleInfo::serializedLength(); } int ParameterListInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TupleInfo::serialize(outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int ParameterListInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TupleInfo::deserialize(inputBuffer, inputBufferLength); validateObjectType(PARAMETER_LIST_INFO_OBJ); validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class UDRWriterCompileTimeData // ------------------------------------------------------------------------ /** * Default constructor. * * UDR writers can derive from this class to store state between * the calls of the compiler interface. */ UDRWriterCompileTimeData::UDRWriterCompileTimeData() {} /** * Virtual destructor. * * Override the virtual destructor in derived classes to clean up any * resources owned by the UDR writer once the compile phase of a * query is completed. */ UDRWriterCompileTimeData::~UDRWriterCompileTimeData() {} /** * Print the object, for use in debugging. * * @see UDR::debugLoop() * @see UDRInvocationInfo::PRINT_INVOCATION_INFO_AT_RUN_TIME */ void UDRWriterCompileTimeData::print() { printf("no print method provided for UDR Writer compile time data\n"); } // ------------------------------------------------------------------------ // Member functions for class UDRInvocationInfo // ------------------------------------------------------------------------ UDRInvocationInfo::UDRInvocationInfo() : TMUDRSerializableObject(UDR_INVOCATION_INFO_OBJ, getCurrentVersion()), numTableInputs_(0), callPhase_(UNKNOWN_CALL_PHASE), funcType_(GENERIC), debugFlags_(0), sqlAccessType_(CONTAINS_NO_SQL), sqlTransactionType_(REQUIRES_NO_TRANSACTION), sqlRights_(INVOKERS_RIGHTS), isolationType_(TRUSTED), udrWriterCompileTimeData_(NULL), totalNumInstances_(0), myInstanceNum_(0) {} UDRInvocationInfo::~UDRInvocationInfo() { // delete all the content of collections of pointers for (std::vector<PredicateInfo *>::iterator p = predicates_.begin(); p != predicates_.end(); p++) delete *p; // delete UDF writer's data if (udrWriterCompileTimeData_) delete udrWriterCompileTimeData_; } /** * Get the UDR name. * * @return Fully qualified name (catalog.schema.name) of the UDR. */ const std::string &UDRInvocationInfo::getUDRName() const { return name_; } /** * Get number of table-valued inputs provided. * * @return Number of table-valued inputs provided. */ int UDRInvocationInfo::getNumTableInputs() const { return numTableInputs_; } /** * Get description of a table-valued input. * * @return TableInfo reference for the table-valued input. * @throws UDRException */ const TableInfo &UDRInvocationInfo::in(int childNum) const { if (childNum < 0 || childNum >= numTableInputs_) throw UDRException(38909, "Invalid child table number %d", childNum); return inputTableInfo_[childNum]; } /** * Get description of the table-valued result. * * @return TableInfo reference for the table-valued output. */ const TableInfo &UDRInvocationInfo::out() const { return outputTableInfo_; } /** * Non-const method to get description of the table-valued result. * * @return Non-const TableInfo reference for the table-valued output. */ TableInfo &UDRInvocationInfo::out() { return outputTableInfo_; } /** * Get call phase. * * This call is not normally needed, since we know which method * of UDR we are in. However, in some cases where the UDR * writer wants to use code in multiple call phases this might * be useful. * * @return Enum for the call phase we are in. */ UDRInvocationInfo::CallPhase UDRInvocationInfo::getCallPhase() const { return callPhase_; } /** * Get current user. * * Get the id of the current user, which is the effective * user id at the time. This is usually the same as * the session user, except when a view or UDR uses "definer * privileges", substituting the current user with the * definer of the view or UDR. In SQL, this value is * called CURRENT_USER. * * @see getSessionUser() * @return Current user. */ const std::string &UDRInvocationInfo::getCurrentUser() const { return currentUser_; } /** * Get session user. * * Get the id of the session user, which is the user who * connected to the database. This is usually the same as * the current user, except when a view or UDR uses "definer * privileges", substituting the current user with the * definer of the view or UDR. In SQL, this value is * called SESSION_USER. * * @see getCurrentUser() * @return Session user. */ const std::string &UDRInvocationInfo::getSessionUser() const { return sessionUser_; } /** * Get current role. * * @return Current role. */ const std::string &UDRInvocationInfo::getCurrentRole() const { return currentRole_; } /** * Get query id. * * The query id is only available at runtime. It is an empty * string at compile time. * * @return Query id. */ const std::string &UDRInvocationInfo::getQueryId() const { return queryId_; } // The next four methods are not yet documented in Doxygen, // since there is no choice yet. Add them to the documentation // when we support more than one choice. UDRInvocationInfo::SQLAccessType UDRInvocationInfo::getSQLAccessType() const { return sqlAccessType_; } UDRInvocationInfo::SQLTransactionType UDRInvocationInfo::getSQLTransactionType() const { return sqlTransactionType_; } UDRInvocationInfo::SQLRightsType UDRInvocationInfo::getSQLRights() const { return sqlRights_; } UDRInvocationInfo::IsolationType UDRInvocationInfo::getIsolationType() const { return isolationType_; } /** * Check whether we are in the compile time interface. * * @return true at compile time, false at run-time. */ bool UDRInvocationInfo::isCompileTime() const { return (callPhase_ <= COMPILER_INITIAL_CALL && callPhase_ <= COMPILER_COMPLETION_CALL); } /** * Check whether we are in the run-time interface. * * @return false at compile time, true at run-time. */ bool UDRInvocationInfo::isRunTime() const { return (callPhase_ >= RUNTIME_WORK_CALL); } /** * Get debugging flags, set via CONTROL QUERY DEFAULT. * * Debug flags are set via the UDR_DEBUG_FLAGS CONTROL QUERY DEFAULT * at compile time. This returns the value of this CQD. Usually not * needed. * * @return Value the UDR_DEBUG_FLAGS CQD has or had at compile time. */ int UDRInvocationInfo::getDebugFlags() const { return debugFlags_; } /** * Get the function type of this UDR invocation. * * Returns the function type that can be set by the UDR writer * with the setFuncType() method. * * @see setFuncType() * * @return Enum of the function type. */ UDRInvocationInfo::FuncType UDRInvocationInfo::getFuncType() const { return funcType_; } /** * Get the formal parameters of the UDR invocation. * * Formal parameters are available only at compile time. * They are either defined in the CREATE FUNCTION DDL or through * the compile time interface. Note that number and types of formal * and actual parameters must match, once we return from the * describeParamsAndColumns() call, otherwise an error will be generated. * * @return Formal parameter description. */ const ParameterListInfo &UDRInvocationInfo::getFormalParameters() const { return formalParameterInfo_; } /** * Get parameters of the UDR invocation. * * These are the actual parameters. At compile time, if a constant * has been used, the value of this constant is available, using * getString(), getInt() etc. methods. The isAvailable() method indicates * whether the parameter is indeed available at compile time. Parameters * are always available at run-time. * * @return Parameter description. */ const ParameterListInfo &UDRInvocationInfo::par() const { return actualParameterInfo_; } ParameterListInfo &UDRInvocationInfo::nonConstFormalParameters() { return formalParameterInfo_; } ParameterListInfo &UDRInvocationInfo::nonConstActualParameters() { return actualParameterInfo_; } /** * Return number of predicates to be applied in the context of this UDF. * * Don't use this method from within UDR::describeParamsAndColumns(), * since the predicates are not yet set up in that phase. * * @return Number of predicates. */ int UDRInvocationInfo::getNumPredicates() const { // predicates are not yet set up in the initial call validateCallPhase(COMPILER_DATAFLOW_CALL, RUNTIME_WORK_CALL, "UDRInvocationInfo::getNumPredicates()"); return predicates_.size(); } /** * Get the description of a predicate to be applied. * * @return Description of the predicate. * * @see setPredicateEvaluationCode() * @throws UDRException */ const PredicateInfo &UDRInvocationInfo::getPredicate(int i) const { if (i < 0 || i >= predicates_.size()) throw UDRException( 38900, "Trying to access predicate %d of a PredicateInfo object with %d predicates", i, predicates_.size()); return *(predicates_[i]); } /** * Check whether a given predicate is a comparison predicate. * * This returns whether it is safe to use method getComparisonPredicate(). * * @see getComparisonPredicate() * * @param i Number/ordinal index of the predicate. * @return true if predcate i is a comparison predicate, false otherwise. * @throws UDRException */ bool UDRInvocationInfo::isAComparisonPredicate(int i) const { return getPredicate(i).isAComparisonPredicate(); } /** * Get a comparison predicate * * Note: This will throw an exception if predicate i is not a * comparison predicate. Use method isAComparisonPredicate() to * make sure this is the case. Note also that the numbering * scheme is the same as that for getPredicate, so if there is * a mix of different predicate types, the numbers of comparison * predicates are not contiguous. * * @see getPredicate() * @see isAComparisonPredicate() * @param i Number/ordinal of the predicate to retrieve. * @return Comparison predicate. * @throws UDRException */ const ComparisonPredicateInfo &UDRInvocationInfo::getComparisonPredicate( int i) const { if (!isAComparisonPredicate(i)) throw UDRException(38900, "Predicate %d is not a comparison predicate", i); return dynamic_cast<ComparisonPredicateInfo &>(*(predicates_[i])); } /** * Add a formal parameter to match an actual parameter. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @see describeParamsAndColumns() * * @param param Info with name and type of the formal parameter. * * @throws UDRException */ void UDRInvocationInfo::addFormalParameter(const ColumnInfo &param) { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_INITIAL_CALL, "UDRInvocationInfo::addFormalParameter()"); formalParameterInfo_.addColumn(param); } /** * Set the function type of this UDR invocation. * * Use this simple method with some caution, since it has an effect * on how predicates are pushed down through TMUDFs with table-valued * inputs. See describeDataflowAndPredicates() for details. The function * type also influences the default degree of parallelism for a TMUDF. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @see getFunctType() * @see describeParamsAndColumns() * @see describeDataflowAndPredicates() * @see setDesiredDegreeOfParallelism() * * @param type Function type of this UDR invocation. * @throws UDRException */ void UDRInvocationInfo::setFuncType(FuncType type) { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_INITIAL_CALL, "UDRInvocationInfo::setFuncType()"); funcType_ = type; // also set the default value for partitioning of table-valued inputs // to ANY, if this UDF is a mapper, to allow parallel execution // without restrictions if (type == MAPPER && getNumTableInputs() == 1 && in().getQueryPartitioning().getType() == PartitionInfo::UNKNOWN) inputTableInfo_[0].getQueryPartitioning().setType(PartitionInfo::ANY); } /** * Add columns of table-valued inputs as output columns. * * Many TMUDFs make the column values of their table-valued inputs available * as output columns. Such columns are called "pass-thru" columns. This * method is an easy interface to create such pass-thru columns. Note that * if a column is marked as pass-thru column, the UDF must copy the input * value to the output (e.g. with the copyPassThruData() method). If it fails * to do that, incorrect results may occur, because the compiler makes * the assumptions that these values are the same. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @see UDR::describeParamsAndColumns() * @see ProvenanceInfo * @see ColumnInfo::getProvenance() * * @param inputTableNum Index of table-valued input to add. * @param startInputColNum First column of the table-valued input to add * as an output column. * @param endInputColNum Last column of the table-valued input to add * as an output column (note this is inclusive) * or -1 to add all remaining column. * @throws UDRException */ void UDRInvocationInfo::addPassThruColumns(int inputTableNum, int startInputColNum, int endInputColNum) { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_INITIAL_CALL, "UDRInvocationInfo::addPassThruColumns()"); // Adding one or more columns from an input (child) table as output columns // The advantage of doing this is that the query optimizer can automatically // apply some optimizations: // // - Push predicates on output columns down to input tables. This reduces the // number of rows that have to be processed by the TMUDF. // - If a table-valued input is ordered, the TMUDF output is assumed to be // also ordered on the corresponding columns. // - Similar for partitioning. // - If there are histogram statistics on an input column, these statistics // will be used for the output columns as well, even though the TMUDF may // eliminate some input rows and duplicate others, so the total row count // and frequencies of values may or may not be usable. if (endInputColNum == -1) endInputColNum = in(inputTableNum).getNumColumns() - 1; for (int c=startInputColNum; c<=endInputColNum; c++) { // make a copy of the input column ColumnInfo newCol(in(inputTableNum).getColumn(c)); // change the provenance info of the column newCol.setProvenance(ProvenanceInfo(inputTableNum, c)); outputTableInfo_.addColumn(newCol); } } /** * Set the PARTITION BY info for a table-valued input. * * This method allows the UDR writer to override the * PARTITION BY syntax specified for a table-valued input * in the query. Use it to change the required partitioning. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @see getChildPartitioning() * @see UDR::describeParamsAndColumns() * * @param inputTableNum Number of table-valued input to set. * @param partInfo New information on required partitioning for this input table. * @throws UDRException */ void UDRInvocationInfo::setChildPartitioning(int inputTableNum, const PartitionInfo &partInfo) { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_INITIAL_CALL, "UDRInvocationInfo::setChildPartitioning()"); if (inputTableNum < 0 || inputTableNum >= numTableInputs_) throw UDRException(38900, "Invalid child table number %d", inputTableNum); inputTableInfo_[inputTableNum].setQueryPartitioning(partInfo); } /** * Set the ORDER BY info for a table-valued input. * * This method allows the UDR writer to override the * ORDER BY syntax specified for a table-valued input * in the query. Use it to change the required order. * * Only use this method from within the * UDR::describeParamsAndColumns() method. * * @see getChildOrdering() * @see UDR::describeParamsAndColumns() * * @param inputTableNum Number of table-valued input to set. * @param orderInfo New information on required order for this input table. * @throws UDRException */ void UDRInvocationInfo::setChildOrdering(int inputTableNum, const OrderInfo &orderInfo) { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_INITIAL_CALL, "UDRInvocationInfo::setChildOrder()"); if (inputTableNum < 0 || inputTableNum >= numTableInputs_) throw UDRException(38900, "Invalid child table number %d", inputTableNum); inputTableInfo_[inputTableNum].setQueryOrdering(orderInfo); } /** * Set the usage information for a column of a table-valued input * * This method allows the UDR writer to specify whether a given * child column is needed or not. * * Only use this method from within the * UDR::describeDataflowAndPredicates() method. * * @see setUnusedPassthruColumns() * @see UDR::describeDataflowAndPredicates() * * @param inputTableNum Number of table-valued input to set. * @param inputColumnNum Column number for the column to set. * @param usage New usage for this column. * @throws UDRException */ void UDRInvocationInfo::setChildColumnUsage(int inputTableNum, int inputColumnNum, ColumnInfo::ColumnUseCode usage) { in(inputTableNum); // validate inputTableNum inputTableInfo_[inputTableNum].getColumn(inputColumnNum).setUsage(usage); } /** * Mark any passthru columns that are not needed as unused. * * For any passthru columns that are marked as NOT_USED or NOT_PRODUCED in * the table-valued result, set the corresponding input columns * to NOT_USED as well. Note that this assumes that the UDF * does not need these columns, either! The usage for the passthru column * itself is also set to NOT_PRODUCED, since the UDF could not produce * the column without having access to the corresponding input column. * * Only use this method from within the * UDR::describeDataflowAndPredicates() method. * * @see addPassThruColumns() * @see setChildColumnUsage() * @see UDR::describeDataflowAndPredicates() * * @throws UDRException */ void UDRInvocationInfo::setUnusedPassthruColumns() { int numOutCols = out().getNumColumns(); // loop over output columns for (int oc=0; oc<numOutCols; oc++) { ColumnInfo &colInfo = out().getColumn(oc); ColumnInfo::ColumnUseCode usage = colInfo.getUsage(); const ProvenanceInfo &prov = colInfo.getProvenance(); int it = prov.getInputTableNum(); int ic = prov.getInputColumnNum(); // is this a pass-thru column that is not used? if (it >= 0 && ic >= 0 && (usage == ColumnInfo::NOT_USED || usage == ColumnInfo::NOT_PRODUCED)) { setChildColumnUsage(it, ic, ColumnInfo::NOT_USED); // also make sure the output column is not produced, since // we could not get its value from the table-valued input colInfo.setUsage(ColumnInfo::NOT_PRODUCED); } } } /** * Decide where to evaluate a predicate. * * Only use this method from within the * UDR::describeDataflowAndPredicates() method. * * @see getPredicate() * @see UDR::describeDataflowAndPredicates() * * @param predicateNum Number/index of predicate returned by getPredicate() * method. * @param c Evaluation code for this predicate. * @throws UDRException */ void UDRInvocationInfo::setPredicateEvaluationCode(int predicateNum, PredicateInfo::EvaluationCode c) { validateCallPhase(COMPILER_DATAFLOW_CALL, COMPILER_DATAFLOW_CALL, "UDRInvocationInfo::setPredicateEvaluationCode()"); // validate index const PredicateInfo &pred = getPredicate(predicateNum); if (c == PredicateInfo::EVALUATE_IN_UDF && pred.isAComparisonPredicate() && !(dynamic_cast<const ComparisonPredicateInfo &>(pred).hasAConstantValue())) throw UDRException( 38900, "Comparison predicate %d cannot be evaluated in the UDF since it does not refer to a constant value", predicateNum); predicates_[predicateNum]->setEvaluationCode(c); } /** * Push predicates on pass-thru columns to the table-valued input. * * Push one or more predicates to their corresponding table-valued input, * if they reference only columns from that input, otherwise leave the * predicate(s) unchanged. * * Only use this method from within the * UDR::describeDataflowAndPredicates() method. * * @see PredicateInfo::setEvaluationCode() * @see UDR::describeDataflowAndPredicates() * * @param startPredNum Number/index of first predicate to be pushed. * @param lastPredNum Number/index of last predicate to be pushed (inclusive) * or -1 to push all remaining predicates. * @throws UDRException */ void UDRInvocationInfo::pushPredicatesOnPassthruColumns(int startPredNum, int lastPredNum) { validateCallPhase(COMPILER_DATAFLOW_CALL, COMPILER_DATAFLOW_CALL, "UDRInvocationInfo::pushPredicatesOnPassthruColumns()"); int numPreds = getNumPredicates(); // loop over predicates in the specified range for (int p = startPredNum; p<numPreds && (p<=lastPredNum || lastPredNum == -1); p++) if (isAComparisonPredicate(p)) { const ComparisonPredicateInfo &cpi = getComparisonPredicate(p); if (out().getColumn(cpi.getColumnNumber()). getProvenance().isFromInputTable()) // Yes, this predicate is a comparison predicate on a pass-thru // column (note we do not allow predicates of the form // "col1 op col2"). Push it down. setPredicateEvaluationCode(p,PredicateInfo::EVALUATE_IN_CHILD); } } /** * Propagate constraints for UDFs that return one result row for * every input row. * * Use this method only if the UDF returns no more than one result row for * every input row it reads from its single table-valued input. Note that * it is ok for the UDF to return no result rows for some input rows. * Wrong results may be returned by SQL statements involving this UDF if * the UDF does at runtime not conform to the 1x1 relationship of rows. * * Only use this method from within the UDR::describeConstraints() method. * * @param exactlyOneRowPerInput Indicates whether the UDF returns exactly * one output row (true) or at most one output * row (false) for every input row. */ void UDRInvocationInfo::propagateConstraintsFor1To1UDFs( bool exactlyOneRowPerInput) { validateCallPhase(COMPILER_CONSTRAINTS_CALL, COMPILER_CONSTRAINTS_CALL, "UDRInvocationInfo::propagateConstraintsFor1To1UDFs()"); if (getNumTableInputs() == 1) { int numConstraints = in().getNumConstraints(); int numOutputCols = out().getNumColumns(); for (int c=0; c<numConstraints; c++) switch (in().getConstraint(c).getType()) { case ConstraintInfo::CARDINALITY: { const CardinalityConstraintInfo &cc = static_cast<const CardinalityConstraintInfo &>( in().getConstraint(c)); // add a cardinality constraint to the parent with // an adjusted lower bound of 0 if exactlyOneRowPerInput // is false out().addCardinalityConstraint(CardinalityConstraintInfo( (exactlyOneRowPerInput ? cc.getMinNumRows() : 0), cc.getMaxNumRows())); } break; case ConstraintInfo::UNIQUE: { UniqueConstraintInfo ucParent; const UniqueConstraintInfo &ucChild = static_cast<const UniqueConstraintInfo &>( in().getConstraint(c)); int numUniqueCols = ucChild.getNumUniqueColumns(); // translate child columns into parent columns for (int uc=0; uc<numUniqueCols; uc++) for (int oc=0; oc<numOutputCols; oc++) if (out().getColumn(oc).getProvenance().getInputColumnNum() == ucChild.getUniqueColumn(uc)) { ucParent.addColumn(oc); break; } if (ucParent.getNumUniqueColumns() == numUniqueCols) // we were able to translate all the unique columns on the // child into unique columns of the parent, add the constraint out().addUniquenessConstraint(ucParent); } break; default: // should not see this break; } } } /** * Get data to persist between calls of the compile-time interface * * The UDR writer must use a static or dynamic cast to get a pointer * to the derived class. * * Only use this method at compile time. * * @see setUDRWriterCompileTimeData() * * @return UDR writer-specific data that was previously attached or NULL. * @throws UDRException */ UDRWriterCompileTimeData *UDRInvocationInfo::getUDRWriterCompileTimeData() { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_COMPLETION_CALL, "UDRInvocationInfo::getUDRWriterCompileTimeData()"); return udrWriterCompileTimeData_; } /** * Set data to persist between calls of the compile-time interface * * This call can be used to attach an object derived from class * UDRWriterCompileTimeData to the UDRInvocationInfo object. Once * attached, the data will be carried between the stages of the * compiler interface and can be used to keep state. Note that * this data will be deleted at the end of the compiler phase and * will not persist until runtime. * * Only use this method at compile time. * * To keep state for specific plan alternatives, use the * UDRPlanInfo::setUDRWriterCompileTimeData() method. * * @see UDRInvocationInfo::getUDRWriterCompileTimeData() * @see UDRPlanInfo::setUDRWriterCompileTimeData() * @see getUDRWriterCompileTimeData() * * @param compileTimeData UDR writer-defined compile-time data to attach. * @throws UDRException */ void UDRInvocationInfo::setUDRWriterCompileTimeData( UDRWriterCompileTimeData *compileTimeData) { validateCallPhase(COMPILER_INITIAL_CALL, COMPILER_PLAN_CALL, "UDRInvocationInfo::setUDRWriterCompileTimeData()"); if (udrWriterCompileTimeData_) delete udrWriterCompileTimeData_; udrWriterCompileTimeData_ = compileTimeData; } /** * Copy values of pass-thru columns from the input to the output table. * * This method is an easy way to set the values of the table-valued result * row from their corresponding values in the table-valued inputs. * Note that the UDR must set all the values of the pass-thru columns to * the corresponsing values of the input tables. If it fails to do that, * some optimizations done by Trafodion could lead to wrong results * (e.g. some predicates could be applied incorrectly). Every TMUDF with * table-valued inputs and pass-thru columns should call this method for * every row it emits. * * This method can only be called from within UDR::processData(). * * @see addPassThruColumns() * @see UDR::processData() * * @param inputTableNum Number of table-valued input to copy from. * @param startInputColNum First column number in the input table to copy * @param endInputColNum Last column number in the input table to copy * (inclusive) or -1 to copy all remaining columns * @throws UDRException */ void UDRInvocationInfo::copyPassThruData(int inputTableNum, int startInputColNum, int endInputColNum) { // no need to validate call phase, this will raise an exception at compile time // validateCallPhase(RUNTIME_INITIAL_CALL, RUNTIME_FINAL_CALL, // "UDRInvocationInfo::copyPassThruData()"); int endColNum = endInputColNum; int numOutCols = out().getNumColumns(); if (endInputColNum < 0 || endInputColNum >= in(inputTableNum).getNumColumns()) endColNum = in(inputTableNum).getNumColumns() - 1; // loop through the output columns and pick up those that // are passed through from the specified input columns for (int oc=0; oc<numOutCols; oc++) { const ProvenanceInfo &prov = out().getColumn(oc).getProvenance(); int it = prov.getInputTableNum(); int ic = prov.getInputColumnNum(); if (it == inputTableNum && ic >= startInputColNum && ic <= endColNum) { // this output column is passed through from the range // of input columns selected, copy it const TypeInfo &ty = out().getColumn(oc).getType(); switch (ty.getSQLTypeSubClass()) { case TypeInfo::FIXED_CHAR_TYPE: case TypeInfo::VAR_CHAR_TYPE: case TypeInfo::DATE_TYPE: case TypeInfo::TIME_TYPE: case TypeInfo::TIMESTAMP_TYPE: case TypeInfo::LOB_SUB_CLASS: { int strLen = 0; const char *str = in(it).getRaw(ic, strLen); if (in(it).wasNull()) out().setNull(oc); else out().setString(oc, str, strLen); } break; case TypeInfo::EXACT_NUMERIC_TYPE: case TypeInfo::YEAR_MONTH_INTERVAL_TYPE: case TypeInfo::DAY_SECOND_INTERVAL_TYPE: { long l = in(it).getLong(ic); if (in(it).wasNull()) out().setNull(oc); else out().setLong(oc, l); } break; case TypeInfo::APPROXIMATE_NUMERIC_TYPE: { double d = in(it).getDouble(ic); if (in(it).wasNull()) out().setNull(oc); else out().setDouble(oc, d); } break; case TypeInfo::UNDEFINED_TYPE_SUB_CLASS: default: throw UDRException( 38900, "Invalid or unsupported type subclass in UDRInvocationInfo::copyPassThruData: %d", (int) ty.getSQLTypeSubClass()); } } } } /** * Get the number of parallel instances working on this UDR invocation. * * Use this method to find out how many parallel instances are * executing this UDR. * * This method can only be called from within UDR::processData(). * * @see getMyInstanceNum() * @return Number of parallel instances for this UDR invocation. * @throws UDRException */ int UDRInvocationInfo::getNumParallelInstances() const { validateCallPhase(RUNTIME_WORK_CALL, RUNTIME_WORK_CALL, "UDRInvocationInfo::getNumParallelInstances()"); return totalNumInstances_; } /** * Get the instance number of this runtime process. * * Use this method to find out which of the parallel instances * executing a UDR this process is. * * This method can only be called from within UDR::processData(). * * @see getNumParallelInstances() * @return A number between 0 and getNumParallelInstances() - 1. * @throws UDRException */ int UDRInvocationInfo::getMyInstanceNum() const { validateCallPhase(RUNTIME_WORK_CALL, RUNTIME_WORK_CALL, "UDRInvocationInfo::getMyInstanceNum()"); return myInstanceNum_; } /** * Print the object, for use in debugging. * * @see UDR::debugLoop() * @see UDRInvocationInfo::PRINT_INVOCATION_INFO_AT_RUN_TIME */ void UDRInvocationInfo::print() { printf("\nUDRInvocationInfo\n-----------------\n"); printf("UDR Name : %s\n", getUDRName().c_str()); printf("Num of table-valued inputs : %d\n", getNumTableInputs()); printf("Call phase : %s\n", callPhaseToString(callPhase_)); printf("Debug flags : 0x%x\n", getDebugFlags()); printf("Function type : %s\n", (funcType_ == GENERIC ? "GENERIC" : (funcType_ == MAPPER ? "MAPPER" : (funcType_ == REDUCER ? "REDUCER" : "Invalid function type")))); printf("User id : %s\n", getCurrentUser().c_str()); printf("Session user id : %s\n", getSessionUser().c_str()); printf("User role : %s\n", getCurrentRole().c_str()); if (isRunTime()) printf("Query id : %s\n", getQueryId().c_str()); bool needsComma = false; if (!isRunTime()) { printf("Formal parameters : ("); for (int p=0; p<getFormalParameters().getNumColumns(); p++) { std::string buf; if (needsComma) printf(", "); getFormalParameters().getColumn(p).toString(buf); printf("%s", buf.c_str()); needsComma = true; } printf(")\n"); } printf("Actual parameters : ("); needsComma = false; const ParameterListInfo &pli = par(); for (int p=0; p < pli.getNumColumns(); p++) { if (needsComma) printf(", "); if (pli.isAvailable(p)) { std::string strVal = pli.getString(p); if (pli.wasNull()) printf("NULL"); else printf("'%s'", strVal.c_str()); } else { // no value available, print name and type std::string buf; pli.getColumn(p).toString(buf, true); printf("\n "); printf(buf.c_str()); } needsComma = true; } printf(")\n"); if (udrWriterCompileTimeData_) { printf("UDR Writer comp. time data : "); udrWriterCompileTimeData_->print(); printf("\n"); } if (isRunTime()) printf("Instance number (0-based) : %d of %d\n", getMyInstanceNum(), getNumParallelInstances()); for (int c=0; c<getNumTableInputs(); c++) { printf("\nInput TableInfo %d\n-----------------\n", c); const_cast<TableInfo &>(in(c)).print(); } printf("\nOutput TableInfo\n----------------\n"); outputTableInfo_.print(); if (predicates_.size() > 0) { printf("\nPredicates\n----------\n"); for (int p=0; p<getNumPredicates(); p++) { std::string predString; getPredicate(p).toString(predString, out()); switch (getPredicate(p).getEvaluationCode()) { case PredicateInfo::UNKNOWN_EVAL: break; case PredicateInfo::EVALUATE_ON_RESULT: predString += " (evaluated on result)"; break; case PredicateInfo::EVALUATE_IN_UDF: predString += " (evaluated by the UDF)"; break; case PredicateInfo::EVALUATE_IN_CHILD: predString += " (evaluated in the child)"; break; default: predString += " -- invalid evaluation code!"; break; } printf(" %s\n", predString.c_str()); } } } int UDRInvocationInfo::serializedLength() { // Format: base class + name + sqlAccessType + sqlTransactionType_ + // sqlRights + isolationType + debugFlags + type + callPhase + // numTableInputs + n*TableInfo + TableInfo(outputTableInfo_) + // formal params + actual params + num preds + preds int result = TMUDRSerializableObject::serializedLength() + serializedLengthOfString(name_) + serializedLengthOfString(currentUser_) + serializedLengthOfString(sessionUser_) + serializedLengthOfString(currentRole_) + serializedLengthOfString(queryId_) + 9*serializedLengthOfInt(); int i; for (i=0; i<numTableInputs_; i++) result += inputTableInfo_[i].serializedLength(); result += outputTableInfo_.serializedLength(); result += formalParameterInfo_.serializedLength(); result += actualParameterInfo_.serializedLength(); for (std::vector<PredicateInfo *>::iterator it = predicates_.begin(); it != predicates_.end(); it++) { result += (*it)->serializedLength(); } return result; } // more convenient methods for external callers, // without side-effecting parameters void UDRInvocationInfo::serializeObj(Bytes outputBuffer, int outputBufferLength) { Bytes tempBuf = outputBuffer; int tempLen = outputBufferLength; serialize(tempBuf, tempLen); } void UDRInvocationInfo::deserializeObj(ConstBytes inputBuffer, int inputBufferLength) { ConstBytes tempBuf = inputBuffer; int tempLen = inputBufferLength; deserialize(tempBuf, tempLen); } int UDRInvocationInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); int i; result += serializeString(name_, outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(sqlAccessType_), outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(sqlTransactionType_), outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(sqlRights_), outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(isolationType_), outputBuffer, outputBufferLength); result += serializeInt(debugFlags_, outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(funcType_), outputBuffer, outputBufferLength); result += serializeInt(static_cast<int>(callPhase_), outputBuffer, outputBufferLength); result += serializeString(currentUser_, outputBuffer, outputBufferLength); result += serializeString(sessionUser_, outputBuffer, outputBufferLength); result += serializeString(currentRole_, outputBuffer, outputBufferLength); result += serializeString(queryId_, outputBuffer, outputBufferLength); result += serializeInt(numTableInputs_, outputBuffer, outputBufferLength); for (i=0; i<numTableInputs_; i++) result += inputTableInfo_[i].serialize(outputBuffer, outputBufferLength); result += outputTableInfo_.serialize(outputBuffer, outputBufferLength); result += formalParameterInfo_.serialize(outputBuffer, outputBufferLength); result += actualParameterInfo_.serialize(outputBuffer, outputBufferLength); result += serializeInt(predicates_.size(), outputBuffer, outputBufferLength); for (std::vector<PredicateInfo *>::iterator it = predicates_.begin(); it != predicates_.end(); it++) { result += (*it)->serialize(outputBuffer, outputBufferLength); } validateSerializedLength(result); return result; } int UDRInvocationInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int tempInt = 0; int i; int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); validateObjectType(UDR_INVOCATION_INFO_OBJ); result += deserializeString(name_, inputBuffer, inputBufferLength); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); sqlAccessType_ = static_cast<SQLAccessType>(tempInt); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); sqlTransactionType_ = static_cast<SQLTransactionType>(tempInt); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); sqlRights_ = static_cast<SQLRightsType>(tempInt); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); isolationType_ = static_cast<IsolationType>(tempInt); result += deserializeInt(debugFlags_, inputBuffer, inputBufferLength); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); funcType_ = static_cast<FuncType>(tempInt); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); callPhase_ = static_cast<CallPhase>(tempInt); result += deserializeString(currentUser_, inputBuffer, inputBufferLength); result += deserializeString(sessionUser_, inputBuffer, inputBufferLength); result += deserializeString(currentRole_, inputBuffer, inputBufferLength); result += deserializeString(queryId_, inputBuffer, inputBufferLength); result += deserializeInt(numTableInputs_, inputBuffer, inputBufferLength); for (i=0; i<numTableInputs_; i++) result += inputTableInfo_[i].deserialize(inputBuffer, inputBufferLength); result += outputTableInfo_.deserialize(inputBuffer, inputBufferLength); result += formalParameterInfo_.deserialize(inputBuffer, inputBufferLength); result += actualParameterInfo_.deserialize(inputBuffer, inputBufferLength); // delete all predicates for (std::vector<PredicateInfo *>::iterator p = predicates_.begin(); p != predicates_.end(); p++) delete *p; predicates_.clear(); result += deserializeInt(tempInt, inputBuffer, inputBufferLength); for (int p=0; p<tempInt; p++) { switch (getNextObjectType(inputBuffer,inputBufferLength)) { case COMP_PREDICATE_INFO_OBJ: { ComparisonPredicateInfo *p = new ComparisonPredicateInfo; result += p->deserialize(inputBuffer, inputBufferLength); predicates_.push_back(p); } break; default: throw UDRException( 38900, "Found invalid predicate object of type %d", static_cast<int>(getNextObjectType(inputBuffer,inputBufferLength))); } } // The UDR writer compile time data stays in place and is not affected // by deserialization. // totalNumInstances_ and myInstanceNum_ are currently not serialized, // since they are set only at runtime. validateDeserializedLength(result); return result; } void UDRInvocationInfo::validateCallPhase(CallPhase start, CallPhase end, const char *callee) const { if (callPhase_ < start && callPhase_ != UNKNOWN_CALL_PHASE) throw UDRException( 38900, "Method %s cannot be called before the %s phase", callee, callPhaseToString(start)); if (callPhase_ > end) throw UDRException( 38900, "Method %s cannot be called after the %s phase", callee, callPhaseToString(end)); } const char *UDRInvocationInfo::callPhaseToString(CallPhase c) { switch(c) { case UNKNOWN_CALL_PHASE: return "unknown"; break; case COMPILER_INITIAL_CALL: return "describeParamsAndColumns()"; break; case COMPILER_DATAFLOW_CALL: return "describeDataflowAndPredicates()"; break; case COMPILER_CONSTRAINTS_CALL: return "describeConstraints()"; break; case COMPILER_STATISTICS_CALL: return "describeStatistics()"; break; case COMPILER_DOP_CALL: return "describeDesiredDegreeOfParallelism()"; break; case COMPILER_PLAN_CALL: return "describePlanProperties()"; break; case COMPILER_COMPLETION_CALL: return "completeDescription()"; break; case RUNTIME_WORK_CALL: return "runtime work call"; break; default: return "invalid call phase!"; break; } } void UDRInvocationInfo::setQueryId(const char *qid) { queryId_ = qid; } void UDRInvocationInfo::setTotalNumInstances(int i) { totalNumInstances_ = i; } void UDRInvocationInfo::setMyInstanceNum(int i) { myInstanceNum_ = i; } // ------------------------------------------------------------------------ // Member functions for class UDRPlanInfo // ------------------------------------------------------------------------ UDRPlanInfo::UDRPlanInfo(UDRInvocationInfo *invocationInfo, int planNum) : TMUDRSerializableObject(UDR_PLAN_INFO_OBJ, getCurrentVersion()), invocationInfo_(invocationInfo), planNum_(planNum), costPerRow_(-1), degreeOfParallelism_(ANY_DEGREE_OF_PARALLELISM), udrWriterCompileTimeData_(NULL), planData_(NULL), planDataLength_(0) {} UDRPlanInfo::~UDRPlanInfo() { if (udrWriterCompileTimeData_) delete udrWriterCompileTimeData_; if (planData_) delete planData_; } /** * Get a unique id for a given plan within a UDR invocation. * * @return Plan number for this object, relative to the invocation. */ int UDRPlanInfo::getPlanNum() const { return planNum_; } /** * Get the cost of the UDR per row, approximately in nanoseconds. * * @see setCostPerRow() * @return Cost of the UDR per row, in nanoseconds, for optimization purposes. */ long UDRPlanInfo::getCostPerRow() const { return costPerRow_; } /** * Return the desired degree of parallelism for this plan. * * @see setDesiredDegreeOfParallelism() * @return Degree of parallelism to be used for this plan alternative * (positive) or one of the enum values in * UDRPlanInfo::SpecialDegreeOfParallelism (zero or negative). */ int UDRPlanInfo::getDesiredDegreeOfParallelism() const { return degreeOfParallelism_; } /** * Set the desired degree of parallelism. * * Only use this method from within the * UDR::describeDesiredDegreeOfParallelism() method. * * Here are some special values that can be set, in * addition to positive numbers. These are defined in * class UDRPlanInfo. * * @li @c ANY_DEGREE_OF_PARALLELISM: * This will allow the optimizer to choose any degree * of parallelism, including 1 (serial execution) * @li @c DEFAULT_DEGREE_OF_PARALLELISM: * Currently the same as ANY_DEGREE_OF_PARALLELISM. * The optimizer will use a heuristic based on * the estimated cardinality. * @li @c MAX_DEGREE_OF_PARALLELISM: * Choose the highest possible degree of parallelism. * @li @c ONE_INSTANCE_PER_NODE: * Start one parallel instance on every Trafodion node. * This is mostly meant for internal TMUDFs, e.g. a * TMUDF to read the log files on every node. * * @see getDesiredDegreeOfParallelism() * @param dop desired degree of parallelism (a positive number or * one of the enum values in * UDRPlanInfo::SpecialDegreeOfParallelism). * @throws UDRException */ void UDRPlanInfo::setDesiredDegreeOfParallelism(int dop) { invocationInfo_->validateCallPhase(UDRInvocationInfo::COMPILER_DOP_CALL, UDRInvocationInfo::COMPILER_DOP_CALL, "UDRPlanInfo::setDesiredDegreeOfParallelism()"); degreeOfParallelism_ = dop; } /** * Set the cost of the UDR per row, approximately in nanoseconds. * * Specifying a cost can help with query plan issues. Note that the * operator cost ("EST_OPER_COST") in EXPLAIN is not directly related * to the nanosecond value specified here: * <ul> * <li>For parallel plans (those under an ESP_EXCHANGE), the cost * is calculated for one parallel instance only. * <li>The cost in nanoseconds is converted to internal units * (see CQD NCM_UDR_NANOSEC_FACTOR). * <li>The EXPLAIN cost contains additional factors, accounting * for the cost to send input data to the process that executes * the UDR and for sending back the result. * </ul> * * The default implementation estimates the cost to be approximately * 100 * sqrt(out().getRecordLength()). Therefore, a value of * 1000 might be a good starting point for a cost per row estimate, * assuming an output row length of about 1 KB. Increase this for * more complex UDFs or for wider result rows, decrease it for * simpler UDFs or shorter result rows. * * Only use this method from within the * UDR::describeDesiredDegreeOfParallelism() method. * * @see UDR::describeDesiredDegreeOfParallelism() * @see getCostPerRow() * @see UDR::TupleInfo::getRecordLength() * @param nanoseconds Cost of the UDR per row, in nanoseconds, for * optimization purposes. */ void UDRPlanInfo::setCostPerRow(long nanoseconds) { invocationInfo_->validateCallPhase(UDRInvocationInfo::COMPILER_DOP_CALL, UDRInvocationInfo::COMPILER_PLAN_CALL, "UDRPlanInfo::setCostPerRow()"); costPerRow_ = nanoseconds; } /** * Get data to persist between calls of the optimizer interface * * @see setUDRWriterCompileTimeData() * @return UDR writer-specific data that was previously attached or NULL. * @throws UDRException */ UDRWriterCompileTimeData *UDRPlanInfo::getUDRWriterCompileTimeData() { invocationInfo_->validateCallPhase(UDRInvocationInfo::COMPILER_DATAFLOW_CALL, UDRInvocationInfo::COMPILER_COMPLETION_CALL, "UDRPlanInfo::getUDRWriterCompileTimeData()"); return udrWriterCompileTimeData_; } /** * Set data to persist between calls of the optimizer interface * * This call can be used to attach an object derived from class * UDRWriterCompileTimeData to the UDRPlanInfo object. Once * attached, the data will be carried between the stages of the * optimizer interface and can be used to keep state. Note that * this data will be deleted at the end of the optimizer phase and * will not persist until runtime. * * Use this method to keep data that is specific to a query plan * alternative, represented by the UDRPlanInfo object. Use * UDRInvocationInfo::setUDRWriterCompileTimeData() to keep data * that is common for the entire UDR invocation. * * @see UDRInvocationInfo::setUDRWriterCompileTimeData() * @see getUDRWriterCompileTimeData() * @param compileTimeData UDR writer-defined compile-time data to attach. * @throws UDRException */ void UDRPlanInfo::setUDRWriterCompileTimeData( UDRWriterCompileTimeData *compileTimeData) { invocationInfo_->validateCallPhase(UDRInvocationInfo::COMPILER_DATAFLOW_CALL, UDRInvocationInfo::COMPILER_COMPLETION_CALL, "UDRPlanInfo::setUDRWriterCompileTimeData()"); // for now we can't allow this, since we would call the destructor of // this object after we unloaded the DLL containing the code // Todo: Cache DLL opens, at least until after the // UDRInvocationInfo objects get deleted. throw UDRException( 38912, "UDRPlanInfo::setUDRWriterCompileTimeData() not yet supported"); if (udrWriterCompileTimeData_) delete udrWriterCompileTimeData_; udrWriterCompileTimeData_ = compileTimeData; } /** * Attach a byte array to the plan to be sent to the runtime instances. * * Compile time and runtime interfaces of the UDR can be called from * different processes, since UDRs can be executed in parallel and on * different nodes. If the UDR writer would like to carry state from * the compiler interface calls to runtime calls, the best way to achieve * this to attach it using this call and to retrieve the state at runtime * using the getPlanData() call. * * The best place to use this method is from within * UDR::completeDescription() method, since this method is * called on the optimal plan that will be used at runtime. It can * also be called from other methods, and the plan data will be * discarded if the plan is not chosen. * * @see getPlanData() * * @param planData A byte array, content defined by the UDR writer, to be * sent to all runtime instances executing the UDR. The buffer * can and should be deleted by the caller after calling this method. * @param planDataLength Length, in bytes, of the planData. * @throws UDRException */ void UDRPlanInfo::addPlanData(const char *planData, int planDataLength) { invocationInfo_->validateCallPhase(UDRInvocationInfo::COMPILER_DOP_CALL, UDRInvocationInfo::COMPILER_COMPLETION_CALL, "UDRPlanInfo::addPlanData()"); if (planDataLength > 0 && planData == NULL) throw UDRException(38900, "UDRWriterCompileTimeData::addPlanData() with no plan data and length >0"); if (planDataLength_) delete planData_; planData_ = NULL; planDataLength_ = 0; if (planDataLength) { // make a new copy of the input data planData_ = new char[planDataLength]; memcpy(const_cast<char *>(planData_), const_cast<char *>(planData), planDataLength); planDataLength_ = planDataLength; } } /** * Retrieve plan data attached to the UDR invocation and plan. * * This method can be called at runtime to get state generated at compile time. * * @see setPlanData() * * @param planDataLength (out) Length of returned plan data. * @return Pointer to a byte array with plan data generated by the UDR writer * at compile time. */ const char *UDRPlanInfo::getPlanData(int &planDataLength) { planDataLength = planDataLength_; return planData_; } /** * Print the object, for use in debugging. * * @see UDRInvocationInfo::PRINT_INVOCATION_INFO_AT_RUN_TIME */ void UDRPlanInfo::print() { printf("\nUDRPlanInfo\n-----------------------\n"); printf("Plan number : %d\n", planNum_); printf("Cost per row : %ld\n", costPerRow_); printf("Degree of parallelism : %d\n", degreeOfParallelism_); if (udrWriterCompileTimeData_) { printf("UDR Writer comp. time data : "); udrWriterCompileTimeData_->print(); printf("\n"); } printf("UDF Writer plan data length: "); printf("%d\n", planDataLength_); } int UDRPlanInfo::serializedLength() { // Format: base class + long(cost) + int(DoP) + UDR Writer data int result = TMUDRSerializableObject::serializedLength() + serializedLengthOfLong() + serializedLengthOfInt(); int udrWriterPlanDataLen = 0; result += serializedLengthOfBinary(planDataLength_); return result; } // more convenient methods for external callers, // without side-effecting parameters void UDRPlanInfo::serializeObj(Bytes outputBuffer, int outputBufferLength) { Bytes tempBuf = outputBuffer; int tempLen = outputBufferLength; serialize(tempBuf, tempLen); } void UDRPlanInfo::deserializeObj(ConstBytes inputBuffer, int inputBufferLength) { ConstBytes tempBuf = inputBuffer; int tempLen = inputBufferLength; deserialize(tempBuf, tempLen); } int UDRPlanInfo::serialize(Bytes &outputBuffer, int &outputBufferLength) { int result = TMUDRSerializableObject::serialize(outputBuffer, outputBufferLength); result += serializeLong(costPerRow_, outputBuffer, outputBufferLength); result += serializeInt(degreeOfParallelism_, outputBuffer, outputBufferLength); int udrWriterPlanDataLen = 0; char *udrWriterPlanData = NULL; result += serializeBinary(planData_, planDataLength_, outputBuffer, outputBufferLength); validateSerializedLength(result); return result; } int UDRPlanInfo::deserialize(ConstBytes &inputBuffer, int &inputBufferLength) { int result = TMUDRSerializableObject::deserialize(inputBuffer, inputBufferLength); validateObjectType(UDR_PLAN_INFO_OBJ); result += deserializeLong(costPerRow_, inputBuffer, inputBufferLength); result += deserializeInt(degreeOfParallelism_, inputBuffer, inputBufferLength); result += deserializeBinary((const void **) &planData_, planDataLength_, true, inputBuffer, inputBufferLength); validateDeserializedLength(result); return result; } // ------------------------------------------------------------------------ // Member functions for class UDR // ------------------------------------------------------------------------ /** * Default constructor. * * Use this in the constructor of a derived class. */ UDR::UDR() : getNextRowPtr_(NULL), emitRowPtr_(NULL) {} /** * Virtual Destructor. * * Override this destructor and deallocate any resources of a derived * class, if necessary. Note that a UDR object may be used * for several UDR invocations, sometimes at the same time, in one * or more queries. Therefore, this class is for storing resources that * can be shared among multiple invocations. Note also that compile time * and run time may happen in different processes, so it is not possible * to carry state from compile time to run time calls for invocations * with this class. See below for how to carry invocation-related information * between the different phases. * * @see UDRInvocationInfo::setUDRWriterCompileTimeData() * @see UDRPlanInfo::setUDRWriterCompileTimeData() * @see UDRPlanInfo::addPlanData() * @throws UDRException */ UDR::~UDR() {} /** * First method of the compiler interface (optional). * * Describe the output columns of a TMUDF, based on a description of * its parameters (including parameter values that are specified as a * constant) and the description of the table-valued input columns. * When the compiler calls this, it will have set up the formal and * actual parameter descriptions as well as an output column * description containing all the output parameters defined in the * CREATE FUNCTION DDL (if any). * * This method should do a general check of things it expects * that can be validated at this time. Things to check: * @li Number, types and values of actual parameters. * @li Number of table-valued inputs and columns of these inputs. * @li PARTITION BY and ORDER BY clause specified for input tables. * @li Other things like user ids, etc. * * Setting the function type with the UDRInvocationInfo::setFuncType() * method will help the compiler generate more efficient code, * * The method should then generate a description of the table-valued * output columns, if applicable and if the columns provided at DDL * time are not sufficient. The "See also" section points to methods * to set these values. * * Columns of the table-valued output can be declard as "pass-thru" * columns to make many optimizations simpler. * * This method must also add to or alter the formal parameter list * to match the list of actual parameters. * * The default implementation does nothing. * * @see UDRInvocationInfo::par() * @see UDRInvocationInfo::getNumTableInputs() * @see UDRInvocationInfo::in() * @see UDRInvocationInfo::setFuncType() * @see UDRInvocationInfo::addFormalParameter() * @see UDRInvocationInfo::addPassThruColumns() * @see TupleInfo::addColumn() * @see TupleInfo::addIntegerColumn() * @see TupleInfo::addLongColumn() * @see TupleInfo::addCharColumn() * @see TupleInfo::addVarCharColumn() * @see TupleInfo::addColumns() * @see TupleInfo::addColumnAt() * @see TupleInfo::deleteColumn(int) * @see TupleInfo::deleteColumn(const std::string &) * * @param info A description of the UDR invocation. * @throws UDRException */ void UDR::describeParamsAndColumns(UDRInvocationInfo &info) { } /** * Second method of the compiler interface (optional). * * Eliminate unneeded columns and decide where to execute predicates. * * This is the second call in the compiler interface, after * describeParamsAndColumns(). When the compiler calls this, it will * have marked the UDF result columns with a usage code, indicating * any output columns that are not required for this particular query. * It will also have created a list of predicates that need to be * evaluated. * * This method should do three things: * @li Mark columns of the table-valued inputs as not used, based on * the result column usage and internal needs of the UDF. Such * input columns will later be eliminated. * @li Mark output columns that are not used and that can be * easily suppressed by the UDF as NOT_PRODUCED. Such columns * will be eliminated as well. * @li Decide where to evaluate each predicate, a) on the UDF result * (default), b) inside the UDF by code written by the UDF writer, * or c) in the table-valued inputs. * * The default implementation does not mark any of the table-valued input * columns as NOT_USED. It also does not mark any output columns as * NOT_PRODUCED. Predicate handling in the default implementation * depends on the function type: * @li UDRInvocationInfo::GENERIC: * No predicates are pushed down, because the compiler * does not know whether any of the eliminated rows might * have altered the output of the UDF. One example is the * "sessionize" UDF, where eliminated rows can lead to * differences in session ids. * @li UDRInvocationInfo::MAPPER: * All predicates on pass-thru columns are pushed down to * table-valued inputs. Since the UDF carries no state between * the input rows it sees, eliminating any input rows will * not alter any results for other rows. * @li UDRInvocationInfo::REDUCER: * Only predicates on the PARTITION BY columns will be * pushed to table-valued inputs. These predicates may * eliminate entire groups of rows (partitions), and since * no state is carried between such groups that is valid. * * NOTE: When eliminating columns from the table-valued inputs or * the table-valued result, column numbers may change in the * next call, as these columns are actually removed from the * lists. If the UDF carries state between calls and if that * state refers to column numbers, they will need to be * updated. This is best done in this describeDataflowAndPredicates() * call. * * @see ColumnInfo::getUsage() * @see ColumnInfo::setUsage() (to mark output columns as NOT_PRODUCED) * @see UDRInvocationInfo::setFuncType() * @see UDRInvocationInfo::setChildColumnUsage() (to mark unused input columns) * @see UDRInvocationInfo::setUnusedPassthruColumns() * @see UDRInvocationInfo::pushPredicatesOnPassthruColumns() * @see UDRInvocationInfo::setPredicateEvaluationCode() * * @param info A description of the UDR invocation. * @throws UDRException */ void UDR::describeDataflowAndPredicates(UDRInvocationInfo &info) { switch (info.getFuncType()) { case UDRInvocationInfo::GENERIC: break; case UDRInvocationInfo::MAPPER: // push as many predicates as possible to the children info.pushPredicatesOnPassthruColumns(); break; case UDRInvocationInfo::REDUCER: { int partitionedChild = -1; // find a child that uses a PARTITION BY for (int c=0; c<info.getNumTableInputs(); c++) if (info.in(c).getQueryPartitioning().getType() == PartitionInfo::PARTITION) { partitionedChild = c; break; } if (partitionedChild >= 0) { const PartitionInfo &partInfo = info.in(partitionedChild).getQueryPartitioning(); int numPredicates = info.getNumPredicates(); // walk through all comparison predicates for (int p=0; p<numPredicates; p++) if (info.isAComparisonPredicate(p)) { // a predicate on column "predCol" int predCol = info.getComparisonPredicate(p).getColumnNumber(); const ColumnInfo &colInfo = info.out().getColumn(predCol); const ProvenanceInfo &prov = colInfo.getProvenance(); // find the corresponding child table and child column # if (prov.getInputTableNum() == partitionedChild) { int inputColNum = prov.getInputColumnNum(); // check whether inputColNum appears in the PARTITION BY clause for (int pbColIx=0; pbColIx<partInfo.getNumEntries(); pbColIx++) if (partInfo.getColumnNum(pbColIx) == inputColNum) { // yes, this is a predicate on a partitioning column, // push it down if possible info.pushPredicatesOnPassthruColumns(p, p); break; } } // column is from the partitioned input table } // is a comparison predicate } // found a partitioned child table } // REDUCER break; default: throw UDRException( 38900, "Invalid UDR Function type: %d", static_cast<int>(info.getFuncType())); } } /** * Third method of the compiler interface (optional). * * Set up logical constraints on the UDF result table. * * When the compiler calls this method, it will have synthesized * constraints on the table-valued inputs, if any. The UDR writer * can now indicate constraints on the table-valued result. * * The default implementation does nothing. * * @see TableInfo::getNumConstraints() * @see TableInfo::getConstraint() * @see TableInfo::addCardinalityConstraint() * @see TableInfo::addUniquenessConstraint() * @see UDRInvocationInfo::propagateConstraintsFor1To1UDFs() * @param info A description of the UDR invocation. * @throws UDRException */ void UDR::describeConstraints(UDRInvocationInfo &info) { } /** * Fourth method of the compiler interface (optional). * * Set up statistics for the table-valued result. * * When the optimizer calls this method, it will have synthesized * some statistics for the table-valued inputs, if any. The UDR * writer can now indicate the estimated row count for the table-valued * result and estimated number of unique values for the output columns. * * The default implementation does nothing. If no estimated cardinality * is set for the output table and no estimated number of unique values * is set for output columns, the optimizer will make default assumptions. * Here are some of these default assumptions: * <ul> * <li>UDRs of type UDRInvocationInfo::MAPPER return one output row for * each row in their largest input table. * <li>UDRs of type UDRInvocationInfo::REDUCER return one output row for * every partition in their largest partitioned input table. * <li>For output columns that are passthru columns, the estimated * unique entries are the same as for the underlying column in the * table-valued input. * <li>Other default cardinality and unique entry counts can be influenced * with defaults (CONTROL QUERY DEFAULT) in Trafodion SQL. * </ul> * * @see UDRInvocationInfo::setFuncType() * @see ColumnInfo::getEstimatedUniqueEntries() * @see ColumnInfo::setEstimatedUniqueEntries() * @see TableInfo::getEstimatedNumRows() * @see TableInfo::setEstimatedNumRows() * @see TableInfo::getEstimatedNumPartitions() * * @param info A description of the UDR invocation. * @throws UDRException */ void UDR::describeStatistics(UDRInvocationInfo &info) { // do nothing } /** * Fifth method of the compiler interface (optional). * * Describe the desired parallelism of a UDR. * * This method can be used to specify a desired degree of * parallelism, either in absolute or relative terms. * * The default behavior is to allow any degree of parallelism for * TMUDFs of function type UDRInvocationInfo::MAPPER or * UDRInvocationInfo::REDUCER that have exactly one table-valued * input. The default behavior forces serial execution * in all other cases. The reason is that for a single table-valued * input, there is a natural way to parallelize the function by * parallelizing its input a la MapReduce. In all other cases, * parallel execution requires active participation by the UDF, * which is why the UDF needs to signal explicitly that it can * handle such flavors of parallelism. * * Default implementation: * @code * if (info.getNumTableInputs() == 1 && * (info.getFuncType() == UDRInvocationInfo::MAPPER || * info.getFuncType() == UDRInvocationInfo::REDUCER)) * plan.setDesiredDegreeOfParallelism(UDRPlanInfo::ANY_DEGREE_OF_PARALLELISM); * else * plan.setDesiredDegreeOfParallelism(1); // serial execution * @endcode * * @see UDRPlanInfo::setDesiredDegreeOfParallelism() * @see UDRInvocationInfo::setFuncType() * * @param info A description of the UDR invocation. * @param plan Plan-related description of the UDR invocation. * @throws UDRException */ void UDR::describeDesiredDegreeOfParallelism(UDRInvocationInfo &info, UDRPlanInfo &plan) { if (info.getNumTableInputs() == 1 && (info.getFuncType() == UDRInvocationInfo::MAPPER || info.getFuncType() == UDRInvocationInfo::REDUCER)) plan.setDesiredDegreeOfParallelism(UDRPlanInfo::ANY_DEGREE_OF_PARALLELISM); else plan.setDesiredDegreeOfParallelism(1); // serial execution } /** * Sixth method of the compiler interface (optional). * * The query optimizer calls this method once for every plan alternative * considered for a UDR invocation. It provides the required partitioning * and ordering of the result. The UDR writer can decide whether these * requirements are acceptable to the UDR and whether any partitioning * or ordering of the table-valued inputs is required to produce the required * result properties. * * TBD: Default behavior. * * @param info A description of the UDR invocation. * @param plan Plan-related description of the UDR invocation. * @throws UDRException */ void UDR::describePlanProperties(UDRInvocationInfo &info, UDRPlanInfo &plan) { // TBD } /** * Seventh and final method of the compiler interface for TMUDFs (optional). * * This final compile time call gives the UDF writer the opportunity * to examine the chosen query plan, to pass information on to the * runtime method, using UDRPlanInfo::addPlanData(), and to clean up * any resources related to the compile phase of a particular TMUDF * invocation. * * The default implementation does nothing. * * @see UDRPlanInfo::addPlanData() * @see UDRPlanInfo::getUDRWriterCompileTimeData() * @see UDRInvocationInfo::getUDRWriterCompileTimeData() * * @param info A description of the UDR invocation. * @param plan Plan-related description of the UDR invocation. * @throws UDRException */ void UDR::completeDescription(UDRInvocationInfo &info, UDRPlanInfo &plan) { } /** * Runtime code for UDRs (required). * * This is the only method that is mandatory in the implementation * of a UDR (in addition to the factory method). * * This method needs to set the output column values and emit * rows by calling the emitRows() method. It can read rows from * table-valued inputs, using the getNextRow() method. * * @see TupleInfo::setInt() * @see TupleInfo::setString() * @see emitRow() * @see getNextRow() * @see TupleInfo::getInt() * @see TupleInfo::getString() * @see UDRInvocationInfo::copyPassThruData() * * @param info A description of the UDR invocation. * @param plan Plan-related description of the UDR invocation. * @throws UDRException */ void UDR::processData(UDRInvocationInfo &info, UDRPlanInfo &plan) { throw UDRException(38900,"UDR::processData() must be overridden by the UDF"); } /** * Debugging hook for UDRs. * * This method is called in debug Trafodion builds when certain * flags are set in the UDR_DEBUG_FLAGS CQD (CONTROL QUERY DEFAULT). * See https://wiki.trafodion.org/wiki/index.php/Tutorial:_The_object-oriented_UDF_interface#Debugging_UDF_code * for details. * * The default implementation prints out the process id and then * goes into an endless loop. The UDF writer can then attach a * debugger, set breakpoints and force the execution out of the loop. * * Note that the printout of the pid may not always be displayed on * a terminal, for example if the process is executing on a different node. */ void UDR::debugLoop() { int debugLoop = 1; int myPid = static_cast<int>(getpid()); printf("Process %d entered a loop to be able to debug it\n", myPid); // go into a loop to allow the user to attach a debugger, // if requested, set debugLoop = 2 in the debugger to get out while (debugLoop < 2) debugLoop = 1-debugLoop; } /** * Read a row of a table-value input. * * This method can only be called from within processData(). * * @param info A description of the UDR invocation. * @param tableIndex Indicator for which table-valued input to read data. * @return true if another row could be read, false if it reached end of data. * @throws UDRException */ bool UDR::getNextRow(UDRInvocationInfo &info, int tableIndex) { SQLUDR_Q_STATE qstate = SQLUDR_Q_MORE; (*getNextRowPtr_)(info.in(tableIndex).getRowPtr(), tableIndex, &qstate); if (info.getDebugFlags() & UDRInvocationInfo::TRACE_ROWS) switch (qstate) { case SQLUDR_Q_MORE: { std::string row; info.in(tableIndex).getDelimitedRow(row,'|',true); // replace any control characters with escape sequences for (int c=row.size()-1; c>=0; c--) if (row[c] < 32) { char buf[5]; // print \x0a for an ASCII line feed (decimal 10) snprintf(buf, sizeof(buf), "\\x%02hhx", row[c]); row.replace(c, 1, buf); } printf("(%d) Input row from table %d: %s\n", info.getMyInstanceNum(), tableIndex, row.c_str()); } break; case SQLUDR_Q_EOD: printf("(%d) Input table %d reached EOD\n", info.getMyInstanceNum(), tableIndex); break; case SQLUDR_Q_CANCEL: printf("(%d) Cancel request from input table %d\n", info.getMyInstanceNum(), tableIndex); break; default: printf("(%d) Invalid queue state %d from input table %d\n", info.getMyInstanceNum(), qstate, tableIndex); } return (qstate == SQLUDR_Q_MORE); } /** * Emit a row of the table-valued result. * * This method can only be called from within processData(). * * @param info A description of the UDR invocation. * @throws UDRException */ void UDR::emitRow(UDRInvocationInfo &info) { SQLUDR_Q_STATE qstate = SQLUDR_Q_MORE; if (info.getDebugFlags() & UDRInvocationInfo::TRACE_ROWS) { std::string row; info.out().getDelimitedRow(row,'|',true); // replace any control characters with escape sequences for (int c=row.size()-1; c>=0; c--) if (row[c] < 32) { char buf[5]; // print \x0a for an ASCII line feed (decimal 10) snprintf(buf, sizeof(buf), "\\x%02hhx", row[c]); row.replace(c, 1, buf); } printf("(%d) Emitting row: %s\n", info.getMyInstanceNum(), row.c_str()); } (*emitRowPtr_)(info.out().getRowPtr(), 0, &qstate); } /** * For versioning, return features supported by the UDR writer. * * This method can be used in the future to facilitate changes in * the UDR interface. UDR writers will be able to indicte through this * method whether they support new features. * * The default implementation returns 0 (no extra features are supported). * * @return A yet to be determined set of bit flags or codes for * supported features. */ int UDR::getFeaturesSupportedByUDF() { return 0; }
1
9,028
Is the tutorial going to move to the new Trafodion website or will it stay on the Confluence wiki? (Check with Gunnar.)
apache-trafodion
cpp
@@ -55,10 +55,8 @@ func FromStatus(st *status.Status) error { switch errDetails := errDetails.(type) { case *errordetails.ShardOwnershipLostFailure: return newShardOwnershipLost(st, errDetails) - case *errordetails.RetryTaskFailure: - return newRetryTask(st, errDetails) case *errordetails.RetryTaskV2Failure: - return newRetryTaskV2(st, errDetails) + return convertRetryReplication(st, errDetails) } }
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package serviceerror import ( "github.com/gogo/status" "go.temporal.io/api/serviceerror" "google.golang.org/grpc/codes" "go.temporal.io/server/api/errordetails/v1" ) // FromStatus converts gogo gRPC status to service error. func FromStatus(st *status.Status) error { if st == nil || st.Code() == codes.OK { return nil } errDetails := extractErrorDetails(st) switch st.Code() { case codes.InvalidArgument: switch errDetails := errDetails.(type) { case *errordetails.CurrentBranchChangedFailure: return newCurrentBranchChanged(st, errDetails) } case codes.AlreadyExists: switch errDetails.(type) { case *errordetails.TaskAlreadyStartedFailure: return newTaskAlreadyStarted(st) } case codes.Aborted: switch errDetails := errDetails.(type) { case *errordetails.ShardOwnershipLostFailure: return newShardOwnershipLost(st, errDetails) case *errordetails.RetryTaskFailure: return newRetryTask(st, errDetails) case *errordetails.RetryTaskV2Failure: return newRetryTaskV2(st, errDetails) } } return serviceerror.FromStatus(st) } func extractErrorDetails(st *status.Status) interface{} { details := st.Details() if len(details) > 0 { return details[0] } return nil }
1
10,693
maybe call it `fromRetryTaskV2Failure`
temporalio-temporal
go
@@ -16,11 +16,13 @@ package com.google.api.codegen.transformer.php; import com.google.api.codegen.InterfaceView; import com.google.api.codegen.config.FieldConfig; +import com.google.api.codegen.config.FlatteningConfig; import com.google.api.codegen.config.GapicProductConfig; import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType; import com.google.api.codegen.metacode.InitCodeContext; import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType; import com.google.api.codegen.php.PhpGapicCodePathMapper; +import com.google.api.codegen.transformer.DynamicLangApiMethodTransformer; import com.google.api.codegen.transformer.FileHeaderTransformer; import com.google.api.codegen.transformer.GapicInterfaceContext; import com.google.api.codegen.transformer.GapicMethodContext;
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.php; import com.google.api.codegen.InterfaceView; import com.google.api.codegen.config.FieldConfig; import com.google.api.codegen.config.GapicProductConfig; import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType; import com.google.api.codegen.metacode.InitCodeContext; import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType; import com.google.api.codegen.php.PhpGapicCodePathMapper; import com.google.api.codegen.transformer.FileHeaderTransformer; import com.google.api.codegen.transformer.GapicInterfaceContext; import com.google.api.codegen.transformer.GapicMethodContext; import com.google.api.codegen.transformer.InitCodeTransformer; import com.google.api.codegen.transformer.MockServiceTransformer; import com.google.api.codegen.transformer.ModelToViewTransformer; import com.google.api.codegen.transformer.ModelTypeTable; import com.google.api.codegen.transformer.SurfaceNamer; import com.google.api.codegen.transformer.TestCaseTransformer; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.SymbolTable; import com.google.api.codegen.util.php.PhpTypeTable; import com.google.api.codegen.util.testing.StandardValueProducer; import com.google.api.codegen.util.testing.TestValueGenerator; import com.google.api.codegen.util.testing.ValueProducer; import com.google.api.codegen.viewmodel.ClientMethodType; import com.google.api.codegen.viewmodel.ImportSectionView; import com.google.api.codegen.viewmodel.ViewModel; import com.google.api.codegen.viewmodel.testing.ClientTestClassView; import com.google.api.codegen.viewmodel.testing.ClientTestFileView; import com.google.api.codegen.viewmodel.testing.MockGrpcMethodView; import com.google.api.codegen.viewmodel.testing.MockServiceImplFileView; import com.google.api.codegen.viewmodel.testing.MockServiceImplView; import com.google.api.codegen.viewmodel.testing.MockServiceUsageView; import com.google.api.codegen.viewmodel.testing.TestCaseView; import com.google.api.tools.framework.model.Interface; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.Model; import com.google.common.collect.ImmutableList; import java.io.File; import java.util.ArrayList; import java.util.List; /** Responsible for producing testing related views for PHP */ public class PhpGapicSurfaceTestTransformer implements ModelToViewTransformer { private static final String TEST_TEMPLATE_FILE = "php/test.snip"; private static final String MOCK_SERVICE_TEMPLATE_FILE = "php/mock_service.snip"; private final ValueProducer valueProducer = new StandardValueProducer(); private final PhpImportSectionTransformer importSectionTransformer = new PhpImportSectionTransformer(); private final FileHeaderTransformer fileHeaderTransformer = new FileHeaderTransformer(importSectionTransformer); private final TestValueGenerator valueGenerator = new TestValueGenerator(valueProducer); private final TestCaseTransformer testCaseTransformer = new TestCaseTransformer(valueProducer); private final MockServiceTransformer mockServiceTransformer = new MockServiceTransformer(); private final PhpFeatureConfig featureConfig = new PhpFeatureConfig(); private final PhpGapicCodePathMapper pathMapper = PhpGapicCodePathMapper.newBuilder().setPrefix("tests/unit").build(); @Override public List<String> getTemplateFileNames() { return ImmutableList.<String>of(TEST_TEMPLATE_FILE, MOCK_SERVICE_TEMPLATE_FILE); } @Override public List<ViewModel> transform(Model model, GapicProductConfig productConfig) { List<ViewModel> models = new ArrayList<ViewModel>(); PhpSurfaceNamer surfacePackageNamer = new PhpSurfaceNamer(productConfig.getPackageName()); PhpSurfaceNamer testPackageNamer = new PhpSurfaceNamer(surfacePackageNamer.getTestPackageName()); models.addAll(generateTestViews(model, productConfig, surfacePackageNamer, testPackageNamer)); models.addAll( generateMockServiceViews(model, productConfig, surfacePackageNamer, testPackageNamer)); return models; } private static ModelTypeTable createTypeTable(String packageName) { return new ModelTypeTable( new PhpTypeTable(packageName), new PhpModelTypeNameConverter(packageName)); } private List<MockServiceImplFileView> generateMockServiceViews( Model model, GapicProductConfig productConfig, SurfaceNamer surfacePackageNamer, SurfaceNamer testPackageNamer) { List<MockServiceImplFileView> mockFiles = new ArrayList<>(); for (Interface grpcInterface : mockServiceTransformer.getGrpcInterfacesToMock(model, productConfig)) { ModelTypeTable typeTable = createTypeTable(surfacePackageNamer.getTestPackageName()); String name = surfacePackageNamer.getMockGrpcServiceImplName(grpcInterface); String grpcClassName = typeTable.getAndSaveNicknameFor(surfacePackageNamer.getGrpcClientTypeName(grpcInterface)); MockServiceImplView mockImpl = MockServiceImplView.newBuilder() .name(name) .grpcClassName(grpcClassName) .grpcMethods(new ArrayList<MockGrpcMethodView>()) .build(); String outputPath = pathMapper.getOutputPath(grpcInterface, productConfig); addUnitTestImports(typeTable); ImportSectionView importSection = importSectionTransformer.generateImportSection(typeTable.getImports()); mockFiles.add( MockServiceImplFileView.newBuilder() .outputPath(outputPath + File.separator + name + ".php") .templateFileName(MOCK_SERVICE_TEMPLATE_FILE) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, importSection, testPackageNamer)) .serviceImpl(mockImpl) .build()); } return mockFiles; } private List<ClientTestFileView> generateTestViews( Model model, GapicProductConfig productConfig, SurfaceNamer surfacePackageNamer, SurfaceNamer testPackageNamer) { List<ClientTestFileView> testViews = new ArrayList<>(); for (Interface apiInterface : new InterfaceView().getElementIterable(model)) { ModelTypeTable typeTable = createTypeTable(surfacePackageNamer.getTestPackageName()); List<MockServiceImplView> impls = new ArrayList<>(); GapicInterfaceContext context = GapicInterfaceContext.create( apiInterface, productConfig, typeTable, surfacePackageNamer, featureConfig); List<MockServiceUsageView> mockServiceList = new ArrayList<>(); for (Interface grpcInterface : mockServiceTransformer .getGrpcInterfacesForService(model, productConfig, apiInterface) .values()) { String name = surfacePackageNamer.getMockGrpcServiceImplName(grpcInterface); String varName = surfacePackageNamer.getMockServiceVarName(grpcInterface); String grpcClassName = typeTable.getAndSaveNicknameFor( surfacePackageNamer.getGrpcClientTypeName(grpcInterface)); mockServiceList.add( MockServiceUsageView.newBuilder() .className(name) .varName(varName) .implName(name) .build()); impls.add( MockServiceImplView.newBuilder() .name(name) .grpcClassName(grpcClassName) .grpcMethods(new ArrayList<MockGrpcMethodView>()) .build()); } String testClassName = surfacePackageNamer.getUnitTestClassName(context.getInterfaceConfig()); ClientTestClassView testClassView = ClientTestClassView.newBuilder() .apiSettingsClassName( surfacePackageNamer.getNotImplementedString( "PhpGapicSurfaceTestTransformer.generateTestView - apiSettingsClassName")) .apiClassName( surfacePackageNamer.getApiWrapperClassName(context.getInterfaceConfig())) .name(testClassName) .apiName( PhpPackageMetadataNamer.getApiNameFromPackageName( surfacePackageNamer.getPackageName()) .toLowerUnderscore()) .testCases(createTestCaseViews(context)) .apiHasLongRunningMethods(context.getInterfaceConfig().hasLongRunningOperations()) .missingDefaultServiceAddress( !context.getInterfaceConfig().hasDefaultServiceAddress()) .missingDefaultServiceScopes(!context.getInterfaceConfig().hasDefaultServiceScopes()) .mockServices(mockServiceList) .build(); addUnitTestImports(typeTable); String outputPath = pathMapper.getOutputPath(context.getInterface(), productConfig); ImportSectionView importSection = importSectionTransformer.generateImportSection(typeTable.getImports()); testViews.add( ClientTestFileView.newBuilder() .outputPath(outputPath + File.separator + testClassName + ".php") .testClass(testClassView) .templateFileName(TEST_TEMPLATE_FILE) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, importSection, testPackageNamer)) .build()); } return testViews; } private List<TestCaseView> createTestCaseViews(GapicInterfaceContext context) { ArrayList<TestCaseView> testCaseViews = new ArrayList<>(); SymbolTable testNameTable = new SymbolTable(); for (Method method : context.getSupportedMethods()) { GapicMethodContext methodContext = context.asRequestMethodContext(method); if (methodContext.getMethodConfig().getGrpcStreamingType() == GrpcStreamingType.ClientStreaming) { // TODO: Add unit test generation for ClientStreaming methods // Issue: https://github.com/googleapis/toolkit/issues/946 continue; } InitCodeOutputType initCodeOutputType = InitCodeOutputType.FieldList; if (methodContext.getMethodConfig().getGrpcStreamingType() == GrpcStreamingType.BidiStreaming) { initCodeOutputType = InitCodeOutputType.SingleObject; } ClientMethodType clientMethodType = ClientMethodType.OptionalArrayMethod; if (methodContext.getMethodConfig().isLongRunningOperation()) { clientMethodType = ClientMethodType.OperationOptionalArrayMethod; } else if (methodContext.getMethodConfig().isPageStreaming()) { clientMethodType = ClientMethodType.PagedOptionalArrayMethod; } Iterable<FieldConfig> fieldConfigs = methodContext.getMethodConfig().getRequiredFieldConfigs(); InitCodeContext initCodeContext = InitCodeContext.newBuilder() .initObjectType(methodContext.getMethod().getInputType()) .suggestedName(Name.from("request")) .initFieldConfigStrings(methodContext.getMethodConfig().getSampleCodeInitFields()) .initValueConfigMap(InitCodeTransformer.createCollectionMap(methodContext)) .initFields(FieldConfig.toFieldIterable(fieldConfigs)) .outputType(initCodeOutputType) .fieldConfigMap(FieldConfig.toFieldConfigMap(fieldConfigs)) .valueGenerator(valueGenerator) .build(); testCaseViews.add( testCaseTransformer.createTestCaseView( methodContext, testNameTable, initCodeContext, clientMethodType)); } return testCaseViews; } private void addUnitTestImports(ModelTypeTable typeTable) { typeTable.saveNicknameFor("\\Google\\GAX\\ApiException"); typeTable.saveNicknameFor("\\Google\\GAX\\BidiStream"); typeTable.saveNicknameFor("\\Google\\GAX\\ServerStream"); typeTable.saveNicknameFor("\\Google\\GAX\\GrpcCredentialsHelper"); typeTable.saveNicknameFor("\\Google\\GAX\\LongRunning\\OperationsClient"); typeTable.saveNicknameFor("\\Google\\GAX\\Testing\\MockStubTrait"); typeTable.saveNicknameFor("\\Google\\GAX\\Testing\\LongRunning\\MockOperationsImpl"); typeTable.saveNicknameFor("\\Google\\GAX\\Testing\\GeneratedTest"); typeTable.saveNicknameFor("\\PHPUnit_Framework_TestCase"); typeTable.saveNicknameFor("\\Google\\Protobuf\\Any"); typeTable.saveNicknameFor("\\Google\\Protobuf\\GPBEmpty"); typeTable.saveNicknameFor("\\Google\\Longrunning\\GetOperationRequest"); typeTable.saveNicknameFor("\\Grpc"); typeTable.saveNicknameFor("\\stdClass"); } }
1
23,958
The diff for this class is difficult to walk through because I reorganized it to be much more clear. There are improvements throughout, but the most important are in the new `createSmokeTest.*` methods.
googleapis-gapic-generator
java
@@ -164,6 +164,12 @@ class BaseDetector(BaseModule, metaclass=ABCMeta): should be double nested (i.e. List[Tensor], List[List[dict]]), with the outer list indicating test time augmentations. """ + img_norm_cfg = kwargs.get('img_norm_cfg', None) + if img_norm_cfg: + mean = torch.tensor(img_norm_cfg['mean'])[None, ..., None, None] + std = torch.tensor(img_norm_cfg['std'])[None, ..., None, None] + img = (img - mean) / std + if torch.onnx.is_in_onnx_export(): assert len(img_metas) == 1 return self.onnx_export(img[0], img_metas[0])
1
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from collections import OrderedDict import mmcv import numpy as np import torch import torch.distributed as dist from mmcv.runner import BaseModule, auto_fp16 from mmdet.core.visualization import imshow_det_bboxes class BaseDetector(BaseModule, metaclass=ABCMeta): """Base class for detectors.""" def __init__(self, init_cfg=None): super(BaseDetector, self).__init__(init_cfg) self.fp16_enabled = False @property def with_neck(self): """bool: whether the detector has a neck""" return hasattr(self, 'neck') and self.neck is not None # TODO: these properties need to be carefully handled # for both single stage & two stage detectors @property def with_shared_head(self): """bool: whether the detector has a shared head in the RoI Head""" return hasattr(self, 'roi_head') and self.roi_head.with_shared_head @property def with_bbox(self): """bool: whether the detector has a bbox head""" return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) @property def with_mask(self): """bool: whether the detector has a mask head""" return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) or (hasattr(self, 'mask_head') and self.mask_head is not None)) @abstractmethod def extract_feat(self, imgs): """Extract features from images.""" pass def extract_feats(self, imgs): """Extract features from multiple images. Args: imgs (list[torch.Tensor]): A list of images. The images are augmented from the same image but in different ways. Returns: list[torch.Tensor]: Features of different images """ assert isinstance(imgs, list) return [self.extract_feat(img) for img in imgs] def forward_train(self, imgs, img_metas, **kwargs): """ Args: img (list[Tensor]): List of tensors of shape (1, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. kwargs (keyword arguments): Specific to concrete implementation. """ # NOTE the batched image size information may be useful, e.g. # in DETR, this is needed for the construction of masks, which is # then used for the transformer_head. batch_input_shape = tuple(imgs[0].size()[-2:]) for img_meta in img_metas: img_meta['batch_input_shape'] = batch_input_shape async def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError @abstractmethod def simple_test(self, img, img_metas, **kwargs): pass @abstractmethod def aug_test(self, imgs, img_metas, **kwargs): """Test function with test time augmentation.""" pass async def aforward_test(self, *, img, img_metas, **kwargs): for var, name in [(img, 'img'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(img) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(img)}) ' f'!= num of image metas ({len(img_metas)})') # TODO: remove the restriction of samples_per_gpu == 1 when prepared samples_per_gpu = img[0].size(0) assert samples_per_gpu == 1 if num_augs == 1: return await self.async_simple_test(img[0], img_metas[0], **kwargs) else: raise NotImplementedError def forward_test(self, imgs, img_metas, **kwargs): """ Args: imgs (List[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_metas (List[List[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. """ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(imgs)}) ' f'!= num of image meta ({len(img_metas)})') # NOTE the batched image size information may be useful, e.g. # in DETR, this is needed for the construction of masks, which is # then used for the transformer_head. for img, img_meta in zip(imgs, img_metas): batch_size = len(img_meta) for img_id in range(batch_size): img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:]) if num_augs == 1: # proposals (List[List[Tensor]]): the outer list indicates # test-time augs (multiscale, flip, etc.) and the inner list # indicates images in a batch. # The Tensor should have a shape Px4, where P is the number of # proposals. if 'proposals' in kwargs: kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: assert imgs[0].size(0) == 1, 'aug test does not support ' \ 'inference with batch size ' \ f'{imgs[0].size(0)}' # TODO: support test augmentation for predefined proposals assert 'proposals' not in kwargs return self.aug_test(imgs, img_metas, **kwargs) @auto_fp16(apply_to=('img', )) def forward(self, img, img_metas, return_loss=True, **kwargs): """Calls either :func:`forward_train` or :func:`forward_test` depending on whether ``return_loss`` is ``True``. Note this setting will change the expected inputs. When ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor and List[dict]), and when ``resturn_loss=False``, img and img_meta should be double nested (i.e. List[Tensor], List[List[dict]]), with the outer list indicating test time augmentations. """ if torch.onnx.is_in_onnx_export(): assert len(img_metas) == 1 return self.onnx_export(img[0], img_metas[0]) if return_loss: return self.forward_train(img, img_metas, **kwargs) else: return self.forward_test(img, img_metas, **kwargs) def _parse_losses(self, losses): """Parse the raw outputs (losses) of the network. Args: losses (dict): Raw output of the network, which usually contain losses and other necessary infomation. Returns: tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ which may be a weighted sum of all losses, log_vars contains \ all the variables to be sent to the logger. """ log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError( f'{loss_name} is not a tensor or list of tensors') loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) log_vars['loss'] = loss for loss_name, loss_value in log_vars.items(): # reduce loss when distributed training if dist.is_available() and dist.is_initialized(): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return loss, log_vars def train_step(self, data, optimizer): """The iteration step during training. This method defines an iteration step during training, except for the back propagation and optimizer updating, which are done in an optimizer hook. Note that in some complicated cases or models, the whole process including back propagation and optimizer updating is also defined in this method, such as GAN. Args: data (dict): The output of dataloader. optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of runner is passed to ``train_step()``. This argument is unused and reserved. Returns: dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \ ``num_samples``. - ``loss`` is a tensor for back propagation, which can be a weighted sum of multiple losses. - ``log_vars`` contains all the variables to be sent to the logger. - ``num_samples`` indicates the batch size (when the model is DDP, it means the batch size on each GPU), which is used for averaging the logs. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def val_step(self, data, optimizer=None): """The iteration step during validation. This method shares the same signature as :func:`train_step`, but used during val epochs. Note that the evaluation after training epochs is not implemented with this method, but an evaluation hook. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): """Draw `result` over `img`. Args: img (str or Tensor): The image to be displayed. result (Tensor or tuple): The results to draw over `img` bbox_result or (bbox_result, segm_result). score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. The tuple of color should be in BGR order. Default: 'green' text_color (str or tuple(int) or :obj:`Color`):Color of texts. The tuple of color should be in BGR order. Default: 'green' mask_color (None or str or tuple(int) or :obj:`Color`): Color of masks. The tuple of color should be in BGR order. Default: None thickness (int): Thickness of lines. Default: 2 font_size (int): Font size of texts. Default: 13 win_name (str): The window name. Default: '' wait_time (float): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file` """ img = mmcv.imread(img) img = img.copy() if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) # draw segmentation masks segms = None if segm_result is not None and len(labels) > 0: # non empty segms = mmcv.concat_list(segm_result) if isinstance(segms[0], torch.Tensor): segms = torch.stack(segms, dim=0).detach().cpu().numpy() else: segms = np.stack(segms, axis=0) # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes img = imshow_det_bboxes( img, bboxes, labels, segms, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does ' f'not support ONNX EXPORT')
1
25,610
We suggest keeping this logic in lines between 173-175 to restrict its influence.
open-mmlab-mmdetection
py
@@ -88,7 +88,7 @@ class GridView * @param string $name * @param array $parameters * @param bool $echo - * @return string|null + * @return string|null|void */ public function renderBlock($name, array $parameters = [], $echo = true) {
1
<?php namespace Shopsys\FrameworkBundle\Component\Grid; use Symfony\Component\Form\FormView; use Symfony\Component\HttpFoundation\RequestStack; use Symfony\Component\Routing\Generator\UrlGeneratorInterface; use Symfony\Component\Routing\RouterInterface; use Twig_Environment; class GridView { /** * @var \Shopsys\FrameworkBundle\Component\Grid\Grid */ protected $grid; /** * @var array */ protected $templateParameters; /** * @var \Twig_TemplateWrapper[] */ protected $templates; /** * @var string|string[]|null */ protected $theme; /** * @var \Symfony\Component\HttpFoundation\RequestStack */ protected $requestStack; /** * @var \Symfony\Component\Routing\RouterInterface */ protected $router; /** * @var \Twig_Environment */ protected $twig; /** * @param \Shopsys\FrameworkBundle\Component\Grid\Grid $grid * @param \Symfony\Component\HttpFoundation\RequestStack $requestStack * @param \Symfony\Component\Routing\RouterInterface $router * @param \Twig_Environment $twig * @param string|string[] $theme * @param array $templateParameters */ public function __construct( Grid $grid, RequestStack $requestStack, RouterInterface $router, Twig_Environment $twig, $theme, array $templateParameters = [] ) { $this->grid = $grid; $this->requestStack = $requestStack; $this->router = $router; $this->twig = $twig; $this->setTheme($theme, $templateParameters); } public function render() { $this->renderBlock('grid'); } /** * @param array|null $removeParameters */ public function renderHiddenInputs($removeParameters = null) { $this->renderBlock('grid_hidden_inputs', [ 'parameter' => $this->grid->getUrlGridParameters(null, $removeParameters), ]); } /** * @param string $name * @param array $parameters * @param bool $echo * @return string|null */ public function renderBlock($name, array $parameters = [], $echo = true) { foreach ($this->getTemplates() as $template) { if ($template->hasBlock($name)) { $parameters = array_merge( $parameters, $this->templateParameters, [ 'gridView' => $this, 'grid' => $this->grid, ] ); $templateParameters = $this->twig->mergeGlobals($parameters); if ($echo) { echo $template->renderBlock($name, $templateParameters); return; } else { return $template->renderBlock($name, $templateParameters); } } } throw new \InvalidArgumentException(sprintf('Block "%s" doesn\'t exist in grid template "%s".', $name, $this->theme)); } /** * @param \Shopsys\FrameworkBundle\Component\Grid\Column $column * @param array|null $row * @param \Symfony\Component\Form\FormView|null $formView * @param \Symfony\Component\Form\FormView */ public function renderCell(Column $column, array $row = null, FormView $formView = null) { if ($row !== null) { $value = $this->getCellValue($column, $row); } else { $value = null; } $blockParameters = [ 'value' => $value, 'row' => $row, 'column' => $column, 'form' => $formView, ]; if ($formView === null) { $possibleBlocks = [ 'grid_value_cell_id_' . $column->getId(), 'grid_value_cell_type_' . $this->getVariableType($value), 'grid_value_cell', ]; } else { $possibleBlocks = [ 'grid_value_cell_edit_id_' . $column->getId(), 'grid_value_cell_edit_type_' . $this->getVariableType($value), 'grid_value_cell_edit', ]; } foreach ($possibleBlocks as $blockName) { if ($this->blockExists($blockName)) { $this->renderBlock($blockName, $blockParameters); break; } } } /** * @param \Shopsys\FrameworkBundle\Component\Grid\ActionColumn $actionColumn * @param array $row */ public function renderActionCell(ActionColumn $actionColumn, array $row) { $posibleBlocks = [ 'grid_action_cell_type_' . $actionColumn->getType(), 'grid_action_cell', ]; foreach ($posibleBlocks as $blockName) { if ($this->blockExists($blockName)) { $this->renderBlock($blockName, ['actionColumn' => $actionColumn, 'row' => $row]); break; } } } /** * @param \Shopsys\FrameworkBundle\Component\Grid\Column $column */ public function renderTitleCell(Column $column) { $posibleBlocks = [ 'grid_title_cell_id_' . $column->getId(), 'grid_title_cell', ]; foreach ($posibleBlocks as $blockName) { if ($this->blockExists($blockName)) { $this->renderBlock($blockName, ['column' => $column]); break; } } } /** * @param array $parameters * @param array|string|null $removeParameters * @return string */ public function getUrl(array $parameters = null, $removeParameters = null) { $masterRequest = $this->requestStack->getMasterRequest(); $routeParameters = $this->grid->getUrlParameters($parameters, $removeParameters); return $this->router->generate( $masterRequest->attributes->get('_route'), $routeParameters, UrlGeneratorInterface::ABSOLUTE_URL ); } /** * @param string $name * @return bool */ protected function blockExists($name) { foreach ($this->getTemplates() as $template) { if ($template->hasBlock($name)) { return true; } } return false; } /** * @return string|array */ public function getTheme() { return $this->theme; } /** * @param string|string[] $theme * @param array $parameters */ protected function setTheme($theme, array $parameters = []) { $this->theme = $theme; $this->templateParameters = $parameters; } /** * @return \Twig_TemplateWrapper[] */ protected function getTemplates() { if (empty($this->templates)) { $this->templates = []; if (is_array($this->theme)) { foreach ($this->theme as $theme) { $this->templates[] = $this->getTemplateFromString($theme); } } else { $this->templates[] = $this->getTemplateFromString($this->theme); } } return $this->templates; } /** * @param string $theme * @return \Twig_TemplateWrapper */ protected function getTemplateFromString($theme) { return $this->twig->load($theme); } /** * @param \Shopsys\FrameworkBundle\Component\Grid\Column $column * @param array $row * @return mixed */ protected function getCellValue(Column $column, $row) { return Grid::getValueFromRowBySourceColumnName($row, $column->getSourceColumnName()); } /** * @param mixed $variable * @return string */ protected function getVariableType($variable) { switch (gettype($variable)) { case 'boolean': return 'boolean'; case 'integer': case 'double': return 'number'; case 'object': return str_replace('\\', '_', get_class($variable)); case 'string': return 'string'; case 'NULL': return 'null'; default: return 'unknown'; } } }
1
16,304
why not ? maybe another phpstan plugin
shopsys-shopsys
php
@@ -37,14 +37,16 @@ namespace Nethermind.JsonRpc.Modules.Trace private readonly ITracer _tracer; private readonly IBlockFinder _blockFinder; private readonly TransactionDecoder _txDecoder = new TransactionDecoder(); - private readonly CancellationToken _cancellationToken; + private readonly IJsonRpcConfig _jsonRpcConfig; + private readonly TimeSpan _cancellationTokenTimeout; - public TraceModule(IReceiptFinder receiptFinder, ITracer tracer, IBlockFinder blockFinder, CancellationToken cancellationToken = default(CancellationToken)) + public TraceModule(IReceiptFinder receiptFinder, ITracer tracer, IBlockFinder blockFinder, IJsonRpcConfig jsonRpcConfig) { - _cancellationToken = cancellationToken; _receiptFinder = receiptFinder ?? throw new ArgumentNullException(nameof(receiptFinder)); _tracer = tracer ?? throw new ArgumentNullException(nameof(tracer)); _blockFinder = blockFinder ?? throw new ArgumentNullException(nameof(blockFinder)); + _jsonRpcConfig = jsonRpcConfig ?? throw new ArgumentNullException(nameof(jsonRpcConfig)); + _cancellationTokenTimeout = TimeSpan.FromMilliseconds(_jsonRpcConfig.TracerTimeout); } private static ParityTraceTypes GetParityTypes(string[] types)
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; using Nethermind.Blockchain; using Nethermind.Blockchain.Find; using Nethermind.Blockchain.Receipts; using Nethermind.Blockchain.Tracing; using Nethermind.Core; using Nethermind.Core.Crypto; using Nethermind.Evm.Tracing.ParityStyle; using Nethermind.JsonRpc.Data; using Nethermind.Serialization.Rlp; namespace Nethermind.JsonRpc.Modules.Trace { public class TraceModule : ITraceModule { private readonly IReceiptFinder _receiptFinder; private readonly ITracer _tracer; private readonly IBlockFinder _blockFinder; private readonly TransactionDecoder _txDecoder = new TransactionDecoder(); private readonly CancellationToken _cancellationToken; public TraceModule(IReceiptFinder receiptFinder, ITracer tracer, IBlockFinder blockFinder, CancellationToken cancellationToken = default(CancellationToken)) { _cancellationToken = cancellationToken; _receiptFinder = receiptFinder ?? throw new ArgumentNullException(nameof(receiptFinder)); _tracer = tracer ?? throw new ArgumentNullException(nameof(tracer)); _blockFinder = blockFinder ?? throw new ArgumentNullException(nameof(blockFinder)); } private static ParityTraceTypes GetParityTypes(string[] types) { return types.Select(s => (ParityTraceTypes) Enum.Parse(typeof(ParityTraceTypes), s, true)).Aggregate((t1, t2) => t1 | t2); } public ResultWrapper<ParityTxTraceFromReplay> trace_call(TransactionForRpc message, string[] traceTypes, BlockParameter blockParameter) { Transaction tx = message.ToTransaction(); return TraceTx(tx, traceTypes, blockParameter); } public ResultWrapper<ParityTxTraceFromReplay[]> trace_callMany((TransactionForRpc message, string[] traceTypes, BlockParameter numberOrTag)[] a) { throw new NotImplementedException(); } public ResultWrapper<ParityTxTraceFromReplay> trace_rawTransaction(byte[] data, string[] traceTypes) { Transaction tx = _txDecoder.Decode(new RlpStream(data)); return TraceTx(tx, traceTypes, BlockParameter.Latest); } private ResultWrapper<ParityTxTraceFromReplay> TraceTx(Transaction tx, string[] traceTypes, BlockParameter blockParameter) { SearchResult<BlockHeader> headerSearch = _blockFinder.SearchForHeader(blockParameter); if (headerSearch.IsError) { return ResultWrapper<ParityTxTraceFromReplay>.Fail(headerSearch); } BlockHeader header = headerSearch.Object; if (header.IsGenesis) { header = new BlockHeader( header.Hash, Keccak.OfAnEmptySequenceRlp, Address.Zero, header.Difficulty, header.Number + 1, header.GasLimit, header.Timestamp + 1, header.ExtraData); header.TotalDifficulty = 2 * header.Difficulty; } Block block = new Block(header, new[] {tx}, Enumerable.Empty<BlockHeader>()); IReadOnlyCollection<ParityLikeTxTrace> result = TraceBlock(block, GetParityTypes(traceTypes)); return ResultWrapper<ParityTxTraceFromReplay>.Success(new ParityTxTraceFromReplay(result.SingleOrDefault())); } public ResultWrapper<ParityTxTraceFromReplay> trace_replayTransaction(Keccak txHash, string[] traceTypes) { SearchResult<Keccak> blockHashSearch = _receiptFinder.SearchForReceiptBlockHash(txHash); if (blockHashSearch.IsError) { return ResultWrapper<ParityTxTraceFromReplay>.Fail(blockHashSearch); } SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(new BlockParameter(blockHashSearch.Object)); if (blockSearch.IsError) { return ResultWrapper<ParityTxTraceFromReplay>.Fail(blockSearch); } Block block = blockSearch.Object; ParityLikeTxTrace txTrace = TraceTx(block, txHash, GetParityTypes(traceTypes)); return ResultWrapper<ParityTxTraceFromReplay>.Success(new ParityTxTraceFromReplay(txTrace)); } public ResultWrapper<ParityTxTraceFromReplay[]> trace_replayBlockTransactions(BlockParameter blockParameter, string[] traceTypes) { SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(blockParameter); if (blockSearch.IsError) { return ResultWrapper<ParityTxTraceFromReplay[]>.Fail(blockSearch); } Block block = blockSearch.Object; IReadOnlyCollection<ParityLikeTxTrace> txTraces = TraceBlock(block, GetParityTypes(traceTypes)); // ReSharper disable once CoVariantArrayConversion return ResultWrapper<ParityTxTraceFromReplay[]>.Success(txTraces.Select(t => new ParityTxTraceFromReplay(t, true)).ToArray()); } public ResultWrapper<ParityTxTraceFromStore[]> trace_filter(BlockParameter fromBlock, BlockParameter toBlock, Address toAddress, int after, int count) { throw new NotImplementedException(); } public ResultWrapper<ParityTxTraceFromStore[]> trace_block(BlockParameter blockParameter) { SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(blockParameter); if (blockSearch.IsError) { return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockSearch); } Block block = blockSearch.Object; IReadOnlyCollection<ParityLikeTxTrace> txTraces = TraceBlock(block, ParityTraceTypes.Trace | ParityTraceTypes.Rewards); return ResultWrapper<ParityTxTraceFromStore[]>.Success(txTraces.SelectMany(ParityTxTraceFromStore.FromTxTrace).ToArray()); } public ResultWrapper<ParityTxTraceFromStore[]> trace_get(Keccak txHash, int[] positions) { throw new NotImplementedException(); } public ResultWrapper<ParityTxTraceFromStore[]> trace_transaction(Keccak txHash) { SearchResult<Keccak> blockHashSearch = _receiptFinder.SearchForReceiptBlockHash(txHash); if (blockHashSearch.IsError) { return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockHashSearch); } SearchResult<Block> blockSearch = _blockFinder.SearchForBlock(new BlockParameter(blockHashSearch.Object)); if (blockSearch.IsError) { return ResultWrapper<ParityTxTraceFromStore[]>.Fail(blockSearch); } Block block = blockSearch.Object; ParityLikeTxTrace txTrace = TraceTx(block, txHash, ParityTraceTypes.Trace | ParityTraceTypes.Rewards); return ResultWrapper<ParityTxTraceFromStore[]>.Success(ParityTxTraceFromStore.FromTxTrace(txTrace)); } private IReadOnlyCollection<ParityLikeTxTrace> TraceBlock(Block block, ParityTraceTypes traceTypes) { ParityLikeBlockTracer listener = new ParityLikeBlockTracer(traceTypes, _cancellationToken); _tracer.Trace(block, listener); return listener.BuildResult(); } private ParityLikeTxTrace TraceTx(Block block, Keccak txHash, ParityTraceTypes traceTypes) { ParityLikeBlockTracer listener = new ParityLikeBlockTracer(txHash, traceTypes, _cancellationToken); _tracer.Trace(block, listener); return listener.BuildResult().SingleOrDefault(); } } }
1
24,232
That was a really bad CR from me before if I did not spot it.
NethermindEth-nethermind
.cs
@@ -203,7 +203,15 @@ EOF echo "$FORSETI_ENV" > $USER_HOME/forseti_env.sh USER=ubuntu -(echo "{run_frequency} $FORSETI_HOME/setup/gcp/scripts/run_forseti.sh") | crontab -u $USER - + +# Use flock to prevent rerun of the same cron job when the previous job is still running. +# If the lock file does not exist under the tmp directory, it will create the file and put a lock on top of the file. +# When the previous cron job is not finished and the new one is trying to run, it will attempt to acquire the lock +# to the lock file and fail because the file is already locked by the previous process. +# The -n flag in flock will fail the process right away when the process is not able to acquire the lock so we won't +# queue up the jobs. + +(echo "{run_frequency} /usr/bin/flock -n /tmp/forseti_cron_runner.lock $FORSETI_HOME/setup/gcp/scripts/run_forseti.sh") | crontab -u $USER - echo "Added the run_forseti.sh to crontab under user $USER" echo "Execution of startup script finished"
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a GCE instance template for Forseti Security.""" import random def GenerateConfig(context): """Generate configuration.""" USE_BRANCH = context.properties.get('branch-name') FORSETI_HOME = '$USER_HOME/forseti-security' if USE_BRANCH: DOWNLOAD_FORSETI = """ git clone {src_path}.git --branch {branch_name} --single-branch forseti-security """.format( src_path=context.properties['src-path'], branch_name=context.properties['branch-name']) else: DOWNLOAD_FORSETI = """ wget -qO- {src_path}/archive/v{release_version}.tar.gz | tar xvz mv forseti-security-{release_version} forseti-security """.format( src_path=context.properties['src-path'], release_version=context.properties['release-version']) CLOUDSQL_CONN_STRING = '{}:{}:{}'.format( context.env['project'], '$(ref.cloudsql-instance.region)', '$(ref.cloudsql-instance.name)') SCANNER_BUCKET = context.properties['scanner-bucket'] FORSETI_DB_NAME = context.properties['database-name'] SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes'] FORSETI_SERVER_CONF = '{}/configs/forseti_conf_server.yaml'.format(FORSETI_HOME) EXPORT_INITIALIZE_VARS = ( 'export SQL_PORT={0}\n' 'export SQL_INSTANCE_CONN_STRING="{1}"\n' 'export FORSETI_DB_NAME="{2}"\n') EXPORT_INITIALIZE_VARS = EXPORT_INITIALIZE_VARS.format( context.properties['db-port'], CLOUDSQL_CONN_STRING, FORSETI_DB_NAME) EXPORT_FORSETI_VARS = ( 'export FORSETI_HOME={forseti_home}\n' 'export FORSETI_SERVER_CONF={forseti_server_conf}\n' ).format(forseti_home=FORSETI_HOME, forseti_server_conf=FORSETI_SERVER_CONF) RUN_FREQUENCY = context.properties['run-frequency'] resources = [] deployment_name_splitted = context.env['deployment'].split('-') deployment_name_splitted.insert(len(deployment_name_splitted)-1, 'vm') instance_name = '-'.join(deployment_name_splitted) resources.append({ 'name': instance_name, 'type': 'compute.v1.instance', 'properties': { 'zone': context.properties['zone'], 'machineType': ( 'https://www.googleapis.com/compute/v1/projects/{}' '/zones/{}/machineTypes/{}'.format( context.env['project'], context.properties['zone'], context.properties['instance-type'])), 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'boot': True, 'autoDelete': True, 'initializeParams': { 'sourceImage': ( 'https://www.googleapis.com/compute/v1' '/projects/{}/global/images/family/{}'.format( context.properties['image-project'], context.properties['image-family'] ) ) } }], 'networkInterfaces': [{ 'network': ( 'https://www.googleapis.com/compute/v1/' 'projects/{}/global/networks/default'.format( context.env['project'])), 'accessConfigs': [{ 'name': 'External NAT', 'type': 'ONE_TO_ONE_NAT' }] }], 'serviceAccounts': [{ 'email': context.properties['service-account'], 'scopes': SERVICE_ACCOUNT_SCOPES, }], 'metadata': { 'items': [{ 'key': 'startup-script', 'value': """#!/bin/bash exec > /tmp/deployment.log exec 2>&1 # Ubuntu update. sudo apt-get update -y sudo apt-get upgrade -y sudo apt-get update && sudo apt-get --assume-yes install google-cloud-sdk USER_HOME=/home/ubuntu # Install fluentd if necessary. FLUENTD=$(ls /usr/sbin/google-fluentd) if [ -z "$FLUENTD" ]; then cd $USER_HOME curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh bash install-logging-agent.sh fi # Check whether Cloud SQL proxy is installed. CLOUD_SQL_PROXY=$(which cloud_sql_proxy) if [ -z "$CLOUD_SQL_PROXY" ]; then cd $USER_HOME wget https://dl.google.com/cloudsql/cloud_sql_proxy.{cloudsql_arch} sudo mv cloud_sql_proxy.{cloudsql_arch} /usr/local/bin/cloud_sql_proxy chmod +x /usr/local/bin/cloud_sql_proxy fi # Install Forseti Security. cd $USER_HOME rm -rf *forseti* # Download Forseti source code {download_forseti} cd forseti-security # Forseti Host Setup sudo apt-get install -y git unzip # Forseti host dependencies sudo apt-get install -y $(cat setup/dependencies/apt_packages.txt | grep -v "#" | xargs) # Forseti dependencies pip install --upgrade pip==9.0.3 pip install -q --upgrade setuptools wheel pip install -q --upgrade -r requirements.txt # Change the access level of configs/ rules/ and run_forseti.sh chmod -R ug+rwx {forseti_home}/configs {forseti_home}/rules {forseti_home}/setup/gcp/scripts/run_forseti.sh # Install Forseti python setup.py install # Export variables required by initialize_forseti_services.sh. {export_initialize_vars} # Export variables required by run_forseti.sh {export_forseti_vars} # Store the variables in /etc/profile.d/forseti_environment.sh # so all the users will have access to them echo "echo '{export_forseti_vars}' >> /etc/profile.d/forseti_environment.sh" | sudo sh # Download server configuration from GCS gsutil cp gs://{scanner_bucket}/configs/forseti_conf_server.yaml {forseti_server_conf} gsutil cp -r gs://{scanner_bucket}/rules {forseti_home}/ # Start Forseti service depends on vars defined above. bash ./setup/gcp/scripts/initialize_forseti_services.sh echo "Starting services." systemctl start cloudsqlproxy sleep 5 systemctl start forseti echo "Success! The Forseti API server has been started." # Create a Forseti env script FORSETI_ENV="$(cat <<EOF #!/bin/bash export PATH=$PATH:/usr/local/bin # Forseti environment variables export FORSETI_HOME=/home/ubuntu/forseti-security export FORSETI_SERVER_CONF=$FORSETI_HOME/configs/forseti_conf_server.yaml export SCANNER_BUCKET={scanner_bucket} EOF )" echo "$FORSETI_ENV" > $USER_HOME/forseti_env.sh USER=ubuntu (echo "{run_frequency} $FORSETI_HOME/setup/gcp/scripts/run_forseti.sh") | crontab -u $USER - echo "Added the run_forseti.sh to crontab under user $USER" echo "Execution of startup script finished" """.format( # Cloud SQL properties cloudsql_arch = context.properties['cloudsqlproxy-os-arch'], # Install Forseti. download_forseti=DOWNLOAD_FORSETI, # Set ownership for Forseti conf and rules dirs forseti_home=FORSETI_HOME, # Download the Forseti conf and rules. scanner_bucket=SCANNER_BUCKET, forseti_server_conf=FORSETI_SERVER_CONF, # Env variables for Explain export_initialize_vars=EXPORT_INITIALIZE_VARS, # Env variables for Forseti export_forseti_vars=EXPORT_FORSETI_VARS, # Forseti run frequency run_frequency=RUN_FREQUENCY, ) }] } } }) return {'resources': resources}
1
30,131
Does this resolve the scenario when the user-triggered forseti process is running, and it would be killed by the cron job restarting the server?
forseti-security-forseti-security
py
@@ -157,7 +157,6 @@ func (ops *OpStream) Intc(constIndex uint) { } else { ops.trace("intc %d %d", constIndex, ops.intc[constIndex]) } - ops.tpush(StackUint64) } // Uint writes opcodes for loading a uint literal
1
// Copyright (C) 2019-2021 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package logic import ( "bufio" "bytes" "encoding/base32" "encoding/base64" "encoding/binary" "encoding/hex" "errors" "fmt" "io" "os" "strconv" "strings" "github.com/algorand/go-algorand/data/basics" ) // Writer is what we want here. Satisfied by bufio.Buffer type Writer interface { Write([]byte) (int, error) WriteByte(c byte) error } type labelReference struct { sourceLine int // position of the opcode start that refers to the label position int label string } // OpStream is destination for program and scratch space type OpStream struct { Version uint64 Trace io.Writer Warnings []error // informational warnings, shouldn't stop assembly Errors []*lineError // errors that should prevent final assembly Program []byte // Final program bytes. Will stay nil if any errors // Running bytes as they are assembled. jumps must be resolved // and cblocks added before these bytes become a legal program. pending bytes.Buffer intc []uint64 // observed ints in code. We'll put them into a intcblock noIntcBlock bool // prevent prepending intcblock because asm has one bytec [][]byte // observed bytes in code. We'll put them into a bytecblock noBytecBlock bool // prevent prepending bytecblock because asm has one // Keep a stack of the types of what we would push and pop to typecheck a program typeStack []StackType // current sourceLine during assembly sourceLine int // map label string to position within pending buffer labels map[string]int // track references in order to patch in jump offsets labelReferences []labelReference // map opcode offsets to source line OffsetToLine map[int]int } // GetVersion returns the LogicSigVersion we're building to func (ops *OpStream) GetVersion() uint64 { if ops.Version == 0 { ops.Version = AssemblerDefaultVersion } return ops.Version } // createLabel inserts a label reference to point to the next // instruction, reporting an error for a duplicate. func (ops *OpStream) createLabel(label string) { if ops.labels == nil { ops.labels = make(map[string]int) } if _, ok := ops.labels[label]; ok { ops.errorf("duplicate label %s", label) } ops.labels[label] = ops.pending.Len() } // RecordSourceLine adds an entry to pc to line mapping func (ops *OpStream) RecordSourceLine() { if ops.OffsetToLine == nil { ops.OffsetToLine = make(map[int]int) } ops.OffsetToLine[ops.pending.Len()] = ops.sourceLine - 1 } // ReferToLabel records an opcode label refence to resolve later func (ops *OpStream) ReferToLabel(pc int, label string) { ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label}) } func (ops *OpStream) tpush(argType StackType) { ops.typeStack = append(ops.typeStack, argType) } func (ops *OpStream) tpusha(argType []StackType) { ops.typeStack = append(ops.typeStack, argType...) } func (ops *OpStream) tpop() (argType StackType) { if len(ops.typeStack) == 0 { argType = StackNone return } last := len(ops.typeStack) - 1 argType = ops.typeStack[last] ops.typeStack = ops.typeStack[:last] return } // Intc writes opcodes for loading a uint64 constant onto the stack. func (ops *OpStream) Intc(constIndex uint) { switch constIndex { case 0: ops.pending.WriteByte(0x22) // intc_0 case 1: ops.pending.WriteByte(0x23) // intc_1 case 2: ops.pending.WriteByte(0x24) // intc_2 case 3: ops.pending.WriteByte(0x25) // intc_3 default: if constIndex > 0xff { ops.error("cannot have more than 256 int constants") } ops.pending.WriteByte(0x21) // intc ops.pending.WriteByte(uint8(constIndex)) } if constIndex >= uint(len(ops.intc)) { ops.errorf("intc %d is not defined", constIndex) } else { ops.trace("intc %d %d", constIndex, ops.intc[constIndex]) } ops.tpush(StackUint64) } // Uint writes opcodes for loading a uint literal func (ops *OpStream) Uint(val uint64) { found := false var constIndex uint for i, cv := range ops.intc { if cv == val { constIndex = uint(i) found = true break } } if !found { constIndex = uint(len(ops.intc)) ops.intc = append(ops.intc, val) } ops.Intc(constIndex) } // Bytec writes opcodes for loading a []byte constant onto the stack. func (ops *OpStream) Bytec(constIndex uint) { switch constIndex { case 0: ops.pending.WriteByte(0x28) // bytec_0 case 1: ops.pending.WriteByte(0x29) // bytec_1 case 2: ops.pending.WriteByte(0x2a) // bytec_2 case 3: ops.pending.WriteByte(0x2b) // bytec_3 default: if constIndex > 0xff { ops.error("cannot have more than 256 byte constants") } ops.pending.WriteByte(0x27) // bytec ops.pending.WriteByte(uint8(constIndex)) } if constIndex >= uint(len(ops.bytec)) { ops.errorf("bytec %d is not defined", constIndex) } else { ops.trace("bytec %d %s", constIndex, hex.EncodeToString(ops.bytec[constIndex])) } ops.tpush(StackBytes) } // ByteLiteral writes opcodes and data for loading a []byte literal // Values are accumulated so that they can be put into a bytecblock func (ops *OpStream) ByteLiteral(val []byte) { found := false var constIndex uint for i, cv := range ops.bytec { if bytes.Compare(cv, val) == 0 { found = true constIndex = uint(i) break } } if !found { constIndex = uint(len(ops.bytec)) ops.bytec = append(ops.bytec, val) } ops.Bytec(constIndex) } // Arg writes opcodes for loading from Lsig.Args func (ops *OpStream) Arg(val uint64) error { switch val { case 0: ops.pending.WriteByte(0x2d) // arg_0 case 1: ops.pending.WriteByte(0x2e) // arg_1 case 2: ops.pending.WriteByte(0x2f) // arg_2 case 3: ops.pending.WriteByte(0x30) // arg_3 default: if val > 0xff { return ops.error("cannot have more than 256 args") } ops.pending.WriteByte(0x2c) ops.pending.WriteByte(uint8(val)) } ops.tpush(StackBytes) return nil } // Txn writes opcodes for loading a field from the current transaction func (ops *OpStream) Txn(val uint64) { if val >= uint64(len(TxnFieldNames)) { ops.errorf("invalid txn field: %d", val) } ops.pending.WriteByte(0x31) ops.pending.WriteByte(uint8(val)) ops.tpush(TxnFieldTypes[val]) } // Txna writes opcodes for loading array field from the current transaction func (ops *OpStream) Txna(fieldNum uint64, arrayFieldIdx uint64) { if fieldNum >= uint64(len(TxnFieldNames)) { ops.errorf("invalid txn field: %d", fieldNum) fieldNum = 0 // avoid further error in tpush as we forge ahead } if arrayFieldIdx > 255 { ops.errorf("txna array index beyond 255: %d", arrayFieldIdx) } ops.pending.WriteByte(0x36) ops.pending.WriteByte(uint8(fieldNum)) ops.pending.WriteByte(uint8(arrayFieldIdx)) ops.tpush(TxnFieldTypes[fieldNum]) } // Gtxn writes opcodes for loading a field from the current transaction func (ops *OpStream) Gtxn(gid, val uint64) { if val >= uint64(len(TxnFieldNames)) { ops.errorf("invalid gtxn field: %d", val) val = 0 // avoid further error in tpush as we forge ahead } if gid > 255 { ops.errorf("gtxn transaction index beyond 255: %d", gid) } ops.pending.WriteByte(0x33) ops.pending.WriteByte(uint8(gid)) ops.pending.WriteByte(uint8(val)) ops.tpush(TxnFieldTypes[val]) } // Gtxna writes opcodes for loading an array field from the current transaction func (ops *OpStream) Gtxna(gid, fieldNum uint64, arrayFieldIdx uint64) { if fieldNum >= uint64(len(TxnFieldNames)) { ops.errorf("invalid txn field: %d", fieldNum) fieldNum = 0 // avoid further error in tpush as we forge ahead } if gid > 255 { ops.errorf("gtxna group index beyond 255: %d", gid) } if arrayFieldIdx > 255 { ops.errorf("gtxna array index beyond 255: %d", arrayFieldIdx) } ops.pending.WriteByte(0x37) ops.pending.WriteByte(uint8(gid)) ops.pending.WriteByte(uint8(fieldNum)) ops.pending.WriteByte(uint8(arrayFieldIdx)) ops.tpush(TxnFieldTypes[fieldNum]) } // Global writes opcodes for loading an evaluator-global field func (ops *OpStream) Global(val GlobalField) { ops.pending.WriteByte(0x32) ops.pending.WriteByte(uint8(val)) ops.trace("%s (%s)", GlobalFieldNames[val], GlobalFieldTypes[val].String()) ops.tpush(GlobalFieldTypes[val]) } // AssetHolding writes opcodes for accessing data from AssetHolding func (ops *OpStream) AssetHolding(val uint64) { if val >= uint64(len(AssetHoldingFieldNames)) { ops.errorf("invalid asset holding field: %d", val) val = 0 // avoid further error in tpush as we forge ahead } ops.pending.WriteByte(opsByName[ops.Version]["asset_holding_get"].Opcode) ops.pending.WriteByte(uint8(val)) ops.tpush(AssetHoldingFieldTypes[val]) ops.tpush(StackUint64) } // AssetParams writes opcodes for accessing data from AssetParams func (ops *OpStream) AssetParams(val uint64) { if val >= uint64(len(AssetParamsFieldNames)) { ops.errorf("invalid asset params field: %d", val) val = 0 // avoid further error in tpush as we forge ahead } ops.pending.WriteByte(opsByName[ops.Version]["asset_params_get"].Opcode) ops.pending.WriteByte(uint8(val)) ops.tpush(AssetParamsFieldTypes[val]) ops.tpush(StackUint64) } func assembleInt(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("int needs one argument") args = []string{"0"} // By continuing, Uint will maintain type stack. } // check friendly TypeEnum constants te, isTypeEnum := txnTypeConstToUint64[args[0]] if isTypeEnum { ops.Uint(te) return nil } // check raw transaction type strings tt, isTypeStr := txnTypeIndexes[args[0]] if isTypeStr { ops.Uint(tt) return nil } // check OnCompetion constants oc, isOCStr := onCompletionConstToUint64[args[0]] if isOCStr { ops.Uint(oc) return nil } val, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) val = 0 // By continuing, Uint will maintain type stack. } ops.Uint(val) return nil } // Explicit invocation of const lookup and push func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("intc operation needs one argument") args = []string{"0"} // By continuing, Intc will maintain type stack. } constIndex, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) constIndex = 0 // By continuing, Intc will maintain type stack. } ops.Intc(uint(constIndex)) return nil } func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("bytec operation needs one argument") args = []string{"0"} // By continuing, Bytec will maintain type stack. } constIndex, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) constIndex = 0 // By continuing, Bytec will maintain type stack. } ops.Bytec(uint(constIndex)) return nil } func base32DecdodeAnyPadding(x string) (val []byte, err error) { val, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(x) if err != nil { // try again with standard padding var e2 error val, e2 = base32.StdEncoding.DecodeString(x) if e2 == nil { err = nil } } return } func parseBinaryArgs(args []string) (val []byte, consumed int, err error) { arg := args[0] if strings.HasPrefix(arg, "base32(") || strings.HasPrefix(arg, "b32(") { open := strings.IndexRune(arg, '(') close := strings.IndexRune(arg, ')') if close == -1 { err = errors.New("byte base32 arg lacks close paren") return } val, err = base32DecdodeAnyPadding(arg[open+1 : close]) if err != nil { return } consumed = 1 } else if strings.HasPrefix(arg, "base64(") || strings.HasPrefix(arg, "b64(") { open := strings.IndexRune(arg, '(') close := strings.IndexRune(arg, ')') if close == -1 { err = errors.New("byte base64 arg lacks close paren") return } val, err = base64.StdEncoding.DecodeString(arg[open+1 : close]) if err != nil { return } consumed = 1 } else if strings.HasPrefix(arg, "0x") { val, err = hex.DecodeString(arg[2:]) if err != nil { return } consumed = 1 } else if arg == "base32" || arg == "b32" { if len(args) < 2 { err = fmt.Errorf("need literal after 'byte %s'", arg) return } val, err = base32DecdodeAnyPadding(args[1]) if err != nil { return } consumed = 2 } else if arg == "base64" || arg == "b64" { if len(args) < 2 { err = fmt.Errorf("need literal after 'byte %s'", arg) return } val, err = base64.StdEncoding.DecodeString(args[1]) if err != nil { return } consumed = 2 } else if len(arg) > 1 && arg[0] == '"' && arg[len(arg)-1] == '"' { val, err = parseStringLiteral(arg) consumed = 1 } else { err = fmt.Errorf("byte arg did not parse: %v", arg) return } return } func parseStringLiteral(input string) (result []byte, err error) { start := 0 end := len(input) - 1 if input[start] != '"' || input[end] != '"' { return nil, fmt.Errorf("no quotes") } start++ escapeSeq := false hexSeq := false result = make([]byte, 0, end-start+1) // skip first and last quotes pos := start for pos < end { char := input[pos] if char == '\\' && !escapeSeq { if hexSeq { return nil, fmt.Errorf("escape seq inside hex number") } escapeSeq = true pos++ continue } if escapeSeq { escapeSeq = false switch char { case 'n': char = '\n' case 'r': char = '\r' case 't': char = '\t' case '\\': char = '\\' case '"': char = '"' case 'x': hexSeq = true pos++ continue default: return nil, fmt.Errorf("invalid escape seq \\%c", char) } } if hexSeq { hexSeq = false if pos >= len(input)-2 { // count a closing quote return nil, fmt.Errorf("non-terminated hex seq") } num, err := strconv.ParseUint(input[pos:pos+2], 16, 8) if err != nil { return nil, err } char = uint8(num) pos++ } result = append(result, char) pos++ } if escapeSeq || hexSeq { return nil, fmt.Errorf("non-terminated escape seq") } return } // byte {base64,b64,base32,b32}(...) // byte {base64,b64,base32,b32} ... // byte 0x.... // byte "this is a string\n" func assembleByte(ops *OpStream, spec *OpSpec, args []string) error { var val []byte var err error if len(args) == 0 { ops.error("byte operation needs byte literal argument") args = []string{"0x00"} // By continuing, ByteLiteral will maintain type stack. } val, _, err = parseBinaryArgs(args) if err != nil { ops.error(err) val = []byte{} // By continuing, ByteLiteral will maintain type stack. } ops.ByteLiteral(val) return nil } func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error { ops.pending.WriteByte(0x20) // intcblock var scratch [binary.MaxVarintLen64]byte l := binary.PutUvarint(scratch[:], uint64(len(args))) ops.pending.Write(scratch[:l]) ops.intc = make([]uint64, len(args)) for i, xs := range args { cu, err := strconv.ParseUint(xs, 0, 64) if err != nil { ops.error(err) } l = binary.PutUvarint(scratch[:], cu) ops.pending.Write(scratch[:l]) ops.intc[i] = cu } ops.noIntcBlock = true return nil } func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error { ops.pending.WriteByte(0x26) // bytecblock bvals := make([][]byte, 0, len(args)) rest := args for len(rest) > 0 { val, consumed, err := parseBinaryArgs(rest) if err != nil { // Would be nice to keep going, as in // intcblock, but parseBinaryArgs would have // to return a useful consumed value even in // the face of errors. Hard. ops.error(err) return nil } bvals = append(bvals, val) rest = rest[consumed:] } var scratch [binary.MaxVarintLen64]byte l := binary.PutUvarint(scratch[:], uint64(len(bvals))) ops.pending.Write(scratch[:l]) for _, bv := range bvals { l := binary.PutUvarint(scratch[:], uint64(len(bv))) ops.pending.Write(scratch[:l]) ops.pending.Write(bv) } ops.bytec = bvals ops.noBytecBlock = true return nil } // addr A1EU... // parses base32-with-checksum account address strings into a byte literal func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("addr operation needs one argument") // By continuing, ByteLiteral will maintain type stack. args = []string{"7777777777777777777777777777777777777777777777777774MSJUVU"} } addr, err := basics.UnmarshalChecksumAddress(args[0]) if err != nil { ops.error(err) addr = basics.Address{} // By continuing, ByteLiteral will maintain type stack. } ops.ByteLiteral(addr[:]) return nil } func assembleArg(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("arg operation needs one argument") args = []string{"0"} } val, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) val = 0 // Let ops.Arg maintain type stack } ops.Arg(val) return nil } func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("branch operation needs label argument") // proceeding so checkArgs runs } else { ops.ReferToLabel(ops.pending.Len(), args[0]) } ops.checkArgs(*spec) ops.pending.WriteByte(spec.Opcode) // zero bytes will get replaced with actual offset in resolveLabels() ops.pending.WriteByte(0) ops.pending.WriteByte(0) return nil } func assembleLoad(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("load operation needs one argument") args = []string{"0"} // By continuing, tpush will maintain type stack. } val, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) val = 0 } if val > EvalMaxScratchSize { ops.errorf("load outside 0..255: %d", val) val = 0 } ops.pending.WriteByte(0x34) ops.pending.WriteByte(byte(val)) ops.tpush(StackAny) return nil } func assembleStore(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("store operation needs one argument") args = []string{"0"} // By continuing, checkArgs, tpush will maintain type stack. } val, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) val = 0 } if val > EvalMaxScratchSize { ops.errorf("store outside 0..255: %d", val) val = 0 } ops.checkArgs(*spec) ops.pending.WriteByte(spec.Opcode) ops.pending.WriteByte(byte(val)) return nil } func assembleSubstring(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 2 { ops.error("substring expects 2 args") args = []string{"0", "0"} // By continuing, checkArgs, tpush will maintain type stack. } start, err := strconv.ParseUint(args[0], 0, 64) if err != nil { ops.error(err) start = 0 } if start > EvalMaxScratchSize { ops.error("substring limited to 0..255") start = 0 } end, err := strconv.ParseUint(args[1], 0, 64) if err != nil { ops.error(err) end = start } if end > EvalMaxScratchSize { ops.error("substring limited to 0..255") end = start } if end < start { ops.error("substring end is before start") end = start } opcode := byte(0x51) ops.checkArgs(*spec) ops.pending.WriteByte(opcode) ops.pending.WriteByte(byte(start)) ops.pending.WriteByte(byte(end)) ops.trace(" pushes([]byte)") ops.tpush(StackBytes) return nil } func disSubstring(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } start := uint(dis.program[dis.pc+1]) end := uint(dis.program[dis.pc+2]) dis.nextpc = dis.pc + 3 _, dis.err = fmt.Fprintf(dis.out, "substring %d %d\n", start, end) } func assembleTxn(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { return ops.error("txn expects one argument") } fs, ok := txnFieldSpecByName[args[0]] if !ok { return ops.errorf("txn unknown arg: %v", args[0]) } _, ok = txnaFieldSpecByField[fs.field] if ok { return ops.errorf("found txna field %v in txn op", args[0]) } if fs.version > ops.Version { return ops.errorf("txn %s available in version %d. Missed #pragma version?", args[0], fs.version) } val := fs.field ops.Txn(uint64(val)) return nil } // assembleTxn2 delegates to assembleTxn or assembleTxna depending on number of operands func assembleTxn2(ops *OpStream, spec *OpSpec, args []string) error { if len(args) == 1 { return assembleTxn(ops, spec, args) } if len(args) == 2 { return assembleTxna(ops, spec, args) } return ops.error("txn expects one or two arguments") } func assembleTxna(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 2 { return ops.error("txna expects two arguments") } fs, ok := txnFieldSpecByName[args[0]] if !ok { return ops.errorf("txna unknown arg: %v", args[0]) } _, ok = txnaFieldSpecByField[fs.field] if !ok { return ops.errorf("txna unknown arg: %v", args[0]) } if fs.version > ops.Version { return ops.errorf("txna %s available in version %d. Missed #pragma version?", args[0], fs.version) } arrayFieldIdx, err := strconv.ParseUint(args[1], 0, 64) if err != nil { return ops.error(err) } fieldNum := fs.field ops.Txna(uint64(fieldNum), uint64(arrayFieldIdx)) return nil } func assembleGtxn(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 2 { return ops.error("gtxn expects two arguments") } gtid, err := strconv.ParseUint(args[0], 0, 64) if err != nil { return ops.error(err) } fs, ok := txnFieldSpecByName[args[1]] if !ok { return ops.errorf("gtxn unknown arg: %v", args[1]) } _, ok = txnaFieldSpecByField[fs.field] if ok { return ops.errorf("found gtxna field %v in gtxn op", args[1]) } if fs.version > ops.Version { return ops.errorf("gtxn %s available in version %d. Missed #pragma version?", args[1], fs.version) } val := fs.field ops.Gtxn(gtid, uint64(val)) return nil } func assembleGtxn2(ops *OpStream, spec *OpSpec, args []string) error { if len(args) == 2 { return assembleGtxn(ops, spec, args) } if len(args) == 3 { return assembleGtxna(ops, spec, args) } return ops.error("gtxn expects two or three arguments") } func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 3 { return ops.error("gtxna expects three arguments") } gtid, err := strconv.ParseUint(args[0], 0, 64) if err != nil { return ops.error(err) } fs, ok := txnFieldSpecByName[args[1]] if !ok { return ops.errorf("gtxna unknown arg: %v", args[1]) } _, ok = txnaFieldSpecByField[fs.field] if !ok { return ops.errorf("gtxna unknown arg: %v", args[1]) } if fs.version > ops.Version { return ops.errorf("gtxna %s available in version %d. Missed #pragma version?", args[1], fs.version) } arrayFieldIdx, err := strconv.ParseUint(args[2], 0, 64) if err != nil { return ops.error(err) } fieldNum := fs.field ops.Gtxna(gtid, uint64(fieldNum), uint64(arrayFieldIdx)) return nil } func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("global expects one argument") args = []string{GlobalFieldNames[0]} } fs, ok := globalFieldSpecByName[args[0]] if !ok { ops.errorf("global unknown arg: %v", args[0]) fs, _ = globalFieldSpecByName[GlobalFieldNames[0]] } if fs.version > ops.Version { ops.errorf("global %s available in version %d. Missed #pragma version?", args[0], fs.version) } ops.Global(fs.gfield) return nil } func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("asset_holding_get expects one argument") args = []string{AssetHoldingFieldNames[0]} } val, ok := assetHoldingFields[args[0]] if !ok { ops.errorf("asset_holding_get unknown arg: %v", args[0]) val = 0 } ops.AssetHolding(val) return nil } func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { ops.error("asset_params_get expects one argument") args = []string{AssetParamsFieldNames[0]} } val, ok := assetParamsFields[args[0]] if !ok { ops.errorf("asset_params_get unknown arg: %v", args[0]) val = 0 } ops.AssetParams(val) return nil } type assembleFunc func(*OpStream, *OpSpec, []string) error func asmDefault(ops *OpStream, spec *OpSpec, args []string) error { ops.checkArgs(*spec) if len(spec.Returns) > 0 { ops.tpusha(spec.Returns) ops.trace(" pushes(%s", spec.Returns[0].String()) if len(spec.Returns) > 1 { for _, rt := range spec.Returns[1:] { ops.trace(", %s", rt.String()) } } ops.trace(")") } ops.pending.WriteByte(spec.Opcode) return nil } // keywords handle parsing and assembling special asm language constructs like 'addr' var keywords map[string]assembleFunc func init() { // WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha() keywords = make(map[string]assembleFunc) keywords["int"] = assembleInt keywords["byte"] = assembleByte keywords["addr"] = assembleAddr // parse basics.Address, actually just another []byte constant // WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha() } type lineError struct { Line int Err error } func fmtLineError(line int, format string, args ...interface{}) error { return &lineError{Line: line, Err: fmt.Errorf(format, args...)} } func (le *lineError) Error() string { return fmt.Sprintf("%d: %s", le.Line, le.Err.Error()) } func (le *lineError) Unwrap() error { return le.Err } func typecheck(expected, got StackType) bool { // Some ops push 'any' and we wait for run time to see what it is. // Some of those 'any' are based on fields that we _could_ know now but haven't written a more detailed system of typecheck for (yet). if (expected == StackAny) || (got == StackAny) { return true } return expected == got } var spaces = [256]uint8{'\t': 1, ' ': 1} func fieldsFromLine(line string) []string { var fields []string i := 0 for i < len(line) && spaces[line[i]] != 0 { i++ } start := i inString := false inBase64 := false for i < len(line) { if spaces[line[i]] == 0 { // if not space switch line[i] { case '"': // is a string literal? if !inString { if i == 0 || i > 0 && spaces[line[i-1]] != 0 { inString = true } } else { if line[i-1] != '\\' { // if not escape symbol inString = false } } case '/': // is a comment? if i < len(line)-1 && line[i+1] == '/' && !inBase64 && !inString { if start != i { // if a comment without whitespace fields = append(fields, line[start:i]) } return fields } case '(': // is base64( seq? prefix := line[start:i] if prefix == "base64" || prefix == "b64" { inBase64 = true } case ')': // is ) as base64( completion if inBase64 { inBase64 = false } default: } i++ continue } if !inString { field := line[start:i] fields = append(fields, field) if field == "base64" || field == "b64" { inBase64 = true } else if inBase64 { inBase64 = false } } i++ if !inString { for i < len(line) && spaces[line[i]] != 0 { i++ } start = i } } // add rest of the string if any if start < len(line) { fields = append(fields, line[start:i]) } return fields } func (ops *OpStream) trace(format string, args ...interface{}) { if ops.Trace == nil { return } fmt.Fprintf(ops.Trace, format, args...) } // checks (and pops) arg types from arg type stack func (ops *OpStream) checkArgs(spec OpSpec) { firstPop := true for i := len(spec.Args) - 1; i >= 0; i-- { argType := spec.Args[i] stype := ops.tpop() if firstPop { firstPop = false ops.trace("pops(%s", argType.String()) } else { ops.trace(", %s", argType.String()) } if !typecheck(argType, stype) { err := fmt.Errorf("%s arg %d wanted type %s got %s", spec.Name, i, argType.String(), stype.String()) if len(ops.labelReferences) > 0 { ops.warnf("%w; but branches have happened and assembler does not precisely track types in this case", err) } else { ops.error(err) } } } if !firstPop { ops.trace(")") } } // assemble reads text from an input and accumulates the program func (ops *OpStream) assemble(fin io.Reader) error { scanner := bufio.NewScanner(fin) ops.sourceLine = 0 for scanner.Scan() { ops.sourceLine++ line := scanner.Text() if len(line) == 0 { ops.trace("%d: 0 line\n", ops.sourceLine) continue } if strings.HasPrefix(line, "//") { ops.trace("%d: // line\n", ops.sourceLine) continue } if strings.HasPrefix(line, "#pragma") { // all pragmas must be be already processed in advance ops.trace("%d: #pragma line\n", ops.sourceLine) continue } fields := fieldsFromLine(line) if len(fields) == 0 { ops.trace("%d: no fields\n", ops.sourceLine) continue } opstring := fields[0] spec, ok := opsByName[ops.Version][opstring] var asmFunc assembleFunc if ok { asmFunc = spec.asm } else { kwFunc, ok := keywords[opstring] if ok { asmFunc = kwFunc } } if asmFunc != nil { ops.trace("%3d: %s\t", ops.sourceLine, opstring) ops.RecordSourceLine() asmFunc(ops, &spec, fields[1:]) ops.trace("\n") continue } if opstring[len(opstring)-1] == ':' { ops.createLabel(opstring[:len(opstring)-1]) continue } ops.errorf("unknown opcode: %v", opstring) } // backward compatibility: do not allow jumps behind last instruction in TEAL v1 if ops.Version <= 1 { for label, dest := range ops.labels { if dest == ops.pending.Len() { ops.errorf("label %v is too far away", label) } } } // TODO: warn if expected resulting stack is not len==1 ? ops.resolveLabels() program := ops.prependCBlocks() if ops.Errors != nil { l := len(ops.Errors) if l == 1 { return errors.New("1 error") } return fmt.Errorf("%d errors", l) } ops.Program = program return nil } func (ops *OpStream) resolveLabels() { saved := ops.sourceLine raw := ops.pending.Bytes() reported := make(map[string]bool) for _, lr := range ops.labelReferences { ops.sourceLine = lr.sourceLine dest, ok := ops.labels[lr.label] if !ok { if !reported[lr.label] { ops.errorf("reference to undefined label %v", lr.label) } reported[lr.label] = true continue } // all branch instructions (currently) are opcode byte and 2 offset bytes, and the destination is relative to the next pc as if the branch was a no-op naturalPc := lr.position + 3 if dest < naturalPc { ops.errorf("label %v is before reference but only forward jumps are allowed", lr.label) continue } jump := dest - naturalPc if jump > 0x7fff { ops.errorf("label %v is too far away", lr.label) continue } raw[lr.position+1] = uint8(jump >> 8) raw[lr.position+2] = uint8(jump & 0x0ff) } ops.pending.Reset() ops.pending.Write(raw) ops.sourceLine = saved } // AssemblerDefaultVersion what version of code do we emit by default // AssemblerDefaultVersion is set to 1 on puprose // to prevent accidental building of v1 official templates with version 2 // because these templates are not aware of rekeying. const AssemblerDefaultVersion = 1 // AssemblerMaxVersion is a maximum supported assembler version const AssemblerMaxVersion = LogicVersion const assemblerNoVersion = (^uint64(0)) // prependCBlocks completes the assembly by inserting cblocks if needed. func (ops *OpStream) prependCBlocks() []byte { var scratch [binary.MaxVarintLen64]byte prebytes := bytes.Buffer{} vlen := binary.PutUvarint(scratch[:], ops.GetVersion()) prebytes.Write(scratch[:vlen]) if len(ops.intc) > 0 && !ops.noIntcBlock { prebytes.WriteByte(0x20) // intcblock vlen := binary.PutUvarint(scratch[:], uint64(len(ops.intc))) prebytes.Write(scratch[:vlen]) for _, iv := range ops.intc { vlen = binary.PutUvarint(scratch[:], iv) prebytes.Write(scratch[:vlen]) } } if len(ops.bytec) > 0 && !ops.noBytecBlock { prebytes.WriteByte(0x26) // bytecblock vlen := binary.PutUvarint(scratch[:], uint64(len(ops.bytec))) prebytes.Write(scratch[:vlen]) for _, bv := range ops.bytec { vlen = binary.PutUvarint(scratch[:], uint64(len(bv))) prebytes.Write(scratch[:vlen]) prebytes.Write(bv) } } pbl := prebytes.Len() outl := ops.pending.Len() out := make([]byte, pbl+outl) pl, err := prebytes.Read(out) if pl != pbl || err != nil { ops.errorf("wat: %d prebytes, %d to buffer? err=%w", pbl, pl, err) return nil } ol, err := ops.pending.Read(out[pl:]) if ol != outl || err != nil { ops.errorf("%d program bytes but %d to buffer. err=%w", outl, ol, err) return nil } // fixup offset to line mapping newOffsetToLine := make(map[int]int, len(ops.OffsetToLine)) for o, l := range ops.OffsetToLine { newOffsetToLine[o+pbl] = l } ops.OffsetToLine = newOffsetToLine return out } func (ops *OpStream) error(problem interface{}) error { var le *lineError switch p := problem.(type) { case string: le = &lineError{Line: ops.sourceLine, Err: errors.New(p)} case error: le = &lineError{Line: ops.sourceLine, Err: p} default: le = &lineError{Line: ops.sourceLine, Err: fmt.Errorf("%#v", p)} } ops.Errors = append(ops.Errors, le) return le } func (ops *OpStream) errorf(format string, a ...interface{}) error { return ops.error(fmt.Errorf(format, a...)) } func (ops *OpStream) warn(problem interface{}) error { var le *lineError switch p := problem.(type) { case string: le = &lineError{Line: ops.sourceLine, Err: errors.New(p)} case error: le = &lineError{Line: ops.sourceLine, Err: p} default: le = &lineError{Line: ops.sourceLine, Err: fmt.Errorf("%#v", p)} } warning := fmt.Errorf("warning: %w", le) ops.Warnings = append(ops.Warnings, warning) return warning } func (ops *OpStream) warnf(format string, a ...interface{}) error { return ops.warn(fmt.Errorf(format, a...)) } // ReportProblems issues accumulated warnings and errors to stderr. func (ops *OpStream) ReportProblems(fname string) { for i, e := range ops.Errors { if i > 9 { break } fmt.Fprintf(os.Stderr, "%s: %s\n", fname, e) } for i, w := range ops.Warnings { if i > 9 { break } fmt.Fprintf(os.Stderr, "%s: %s\n", fname, w) } } // AssembleString takes an entire program in a string and assembles it to bytecode using AssemblerDefaultVersion func AssembleString(text string) (*OpStream, error) { return AssembleStringWithVersion(text, assemblerNoVersion) } // AssembleStringWithVersion takes an entire program in a string and // assembles it to bytecode using the assembler version specified. If // version is assemblerNoVersion it uses #pragma version or fallsback // to AssemblerDefaultVersion. OpStream is returned to allow access // to warnings, (multiple) errors, or the PC to source line mapping. func AssembleStringWithVersion(text string, version uint64) (*OpStream, error) { sr := strings.NewReader(text) ps := PragmaStream{} err := ps.Process(sr) if err != nil { return nil, err } // If version not set yet then set either default or #pragma version. // We have to use assemblerNoVersion as a marker for non-specified version // because version 0 is valid version for TEAL v1 if version == assemblerNoVersion { if ps.Version != 0 { version = ps.Version } else { version = AssemblerDefaultVersion } } else if ps.Version != 0 && version != ps.Version { err = fmt.Errorf("version mismatch: assembling v%d with v%d assembler", ps.Version, version) return nil, err } else { // otherwise the passed version matches the pragma and we are ok } sr = strings.NewReader(text) ops := OpStream{Version: version} err = ops.assemble(sr) return &ops, err } // PragmaStream represents all parsed pragmas from the program type PragmaStream struct { Version uint64 } // Process all pragmas in the input stream func (ps *PragmaStream) Process(fin io.Reader) (err error) { scanner := bufio.NewScanner(fin) sourceLine := 0 for scanner.Scan() { sourceLine++ line := scanner.Text() if len(line) == 0 || !strings.HasPrefix(line, "#pragma") { continue } fields := strings.Split(line, " ") if fields[0] != "#pragma" { return fmtLineError(sourceLine, "invalid syntax: %s", fields[0]) } if len(fields) < 2 { return fmtLineError(sourceLine, "empty pragma") } key := fields[1] switch key { case "version": if len(fields) < 3 { return fmtLineError(sourceLine, "no version value") } value := fields[2] var ver uint64 if sourceLine != 1 { return fmtLineError(sourceLine, "#pragma version is only allowed on 1st line") } ver, err = strconv.ParseUint(value, 0, 64) if err != nil { return &lineError{Line: sourceLine, Err: err} } if ver < 1 || ver > AssemblerMaxVersion { return fmtLineError(sourceLine, "unsupported version: %d", ver) } ps.Version = ver default: return fmtLineError(sourceLine, "unsupported pragma directive: %s", key) } } return } type disassembleState struct { program []byte pc int out io.Writer labelCount int pendingLabels map[int]string nextpc int err error } func (dis *disassembleState) putLabel(label string, target int) { if dis.pendingLabels == nil { dis.pendingLabels = make(map[int]string) } dis.pendingLabels[target] = label } func (dis *disassembleState) outputLabelIfNeeded() (err error) { if label, hasLabel := dis.pendingLabels[dis.pc]; hasLabel { _, err = fmt.Fprintf(dis.out, "%s:\n", label) } return } type disassembleFunc func(dis *disassembleState, spec *OpSpec) func disDefault(dis *disassembleState, spec *OpSpec) { dis.nextpc = dis.pc + 1 _, dis.err = fmt.Fprintf(dis.out, "%s\n", spec.Name) } var errShortIntcblock = errors.New("intcblock ran past end of program") var errTooManyIntc = errors.New("intcblock with too many items") func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err error) { pos := pc + 1 numInts, bytesUsed := binary.Uvarint(program[pos:]) if bytesUsed <= 0 { err = fmt.Errorf("could not decode int const block size at pc=%d", pos) return } pos += bytesUsed if numInts > uint64(len(program)) { err = errTooManyIntc return } intc = make([]uint64, numInts) for i := uint64(0); i < numInts; i++ { if pos >= len(program) { err = errShortIntcblock return } intc[i], bytesUsed = binary.Uvarint(program[pos:]) if bytesUsed <= 0 { err = fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos) return } pos += bytesUsed } nextpc = pos return } func checkIntConstBlock(cx *evalContext) int { pos := cx.pc + 1 numInts, bytesUsed := binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { cx.err = fmt.Errorf("could not decode int const block size at pc=%d", pos) return 1 } pos += bytesUsed if numInts > uint64(len(cx.program)) { cx.err = errTooManyIntc return 0 } //intc = make([]uint64, numInts) for i := uint64(0); i < numInts; i++ { if pos >= len(cx.program) { cx.err = errShortIntcblock return 0 } _, bytesUsed = binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { cx.err = fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos) return 1 } pos += bytesUsed } cx.nextpc = pos return 1 } var errShortBytecblock = errors.New("bytecblock ran past end of program") var errTooManyItems = errors.New("bytecblock with too many items") func parseBytecBlock(program []byte, pc int) (bytec [][]byte, nextpc int, err error) { pos := pc + 1 numItems, bytesUsed := binary.Uvarint(program[pos:]) if bytesUsed <= 0 { err = fmt.Errorf("could not decode []byte const block size at pc=%d", pos) return } pos += bytesUsed if numItems > uint64(len(program)) { err = errTooManyItems return } bytec = make([][]byte, numItems) for i := uint64(0); i < numItems; i++ { if pos >= len(program) { err = errShortBytecblock return } itemLen, bytesUsed := binary.Uvarint(program[pos:]) if bytesUsed <= 0 { err = fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos) return } pos += bytesUsed if pos >= len(program) { err = errShortBytecblock return } end := uint64(pos) + itemLen if end > uint64(len(program)) || end < uint64(pos) { err = errShortBytecblock return } bytec[i] = program[pos : pos+int(itemLen)] pos += int(itemLen) } nextpc = pos return } func checkByteConstBlock(cx *evalContext) int { pos := cx.pc + 1 numItems, bytesUsed := binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { cx.err = fmt.Errorf("could not decode []byte const block size at pc=%d", pos) return 1 } pos += bytesUsed if numItems > uint64(len(cx.program)) { cx.err = errTooManyItems return 0 } //bytec = make([][]byte, numItems) for i := uint64(0); i < numItems; i++ { if pos >= len(cx.program) { cx.err = errShortBytecblock return 0 } itemLen, bytesUsed := binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { cx.err = fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos) return 1 } pos += bytesUsed if pos >= len(cx.program) { cx.err = errShortBytecblock return 0 } end := uint64(pos) + itemLen if end > uint64(len(cx.program)) || end < uint64(pos) { cx.err = errShortBytecblock return 0 } //bytec[i] = program[pos : pos+int(itemLen)] pos += int(itemLen) } cx.nextpc = pos return 1 } func disIntcblock(dis *disassembleState, spec *OpSpec) { var intc []uint64 intc, dis.nextpc, dis.err = parseIntcblock(dis.program, dis.pc) if dis.err != nil { return } _, dis.err = fmt.Fprintf(dis.out, "intcblock") if dis.err != nil { return } for _, iv := range intc { _, dis.err = fmt.Fprintf(dis.out, " %d", iv) if dis.err != nil { return } } _, dis.err = dis.out.Write([]byte("\n")) } func disIntc(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 _, dis.err = fmt.Fprintf(dis.out, "intc %d\n", dis.program[dis.pc+1]) } func disBytecblock(dis *disassembleState, spec *OpSpec) { var bytec [][]byte bytec, dis.nextpc, dis.err = parseBytecBlock(dis.program, dis.pc) if dis.err != nil { return } _, dis.err = fmt.Fprintf(dis.out, "bytecblock") if dis.err != nil { return } for _, bv := range bytec { _, dis.err = fmt.Fprintf(dis.out, " 0x%s", hex.EncodeToString(bv)) if dis.err != nil { return } } _, dis.err = dis.out.Write([]byte("\n")) } func disBytec(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 _, dis.err = fmt.Fprintf(dis.out, "bytec %d\n", dis.program[dis.pc+1]) } func disArg(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 _, dis.err = fmt.Fprintf(dis.out, "arg %d\n", dis.program[dis.pc+1]) } func disTxn(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 txarg := dis.program[dis.pc+1] if int(txarg) >= len(TxnFieldNames) { dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) return } _, dis.err = fmt.Fprintf(dis.out, "txn %s\n", TxnFieldNames[txarg]) } func disTxna(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 3 txarg := dis.program[dis.pc+1] if int(txarg) >= len(TxnFieldNames) { dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) return } arrayFieldIdx := dis.program[dis.pc+2] _, dis.err = fmt.Fprintf(dis.out, "txna %s %d\n", TxnFieldNames[txarg], arrayFieldIdx) } func disGtxn(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 3 gi := dis.program[dis.pc+1] txarg := dis.program[dis.pc+2] if int(txarg) >= len(TxnFieldNames) { dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) return } _, dis.err = fmt.Fprintf(dis.out, "gtxn %d %s\n", gi, TxnFieldNames[txarg]) } func disGtxna(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 3 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 4 gi := dis.program[dis.pc+1] txarg := dis.program[dis.pc+2] if int(txarg) >= len(TxnFieldNames) { dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) return } arrayFieldIdx := dis.program[dis.pc+3] _, dis.err = fmt.Fprintf(dis.out, "gtxna %d %s %d\n", gi, TxnFieldNames[txarg], arrayFieldIdx) } func disGlobal(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 garg := dis.program[dis.pc+1] if int(garg) >= len(GlobalFieldNames) { dis.err = fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc) return } _, dis.err = fmt.Fprintf(dis.out, "global %s\n", GlobalFieldNames[garg]) } func disBranch(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 3 offset := (uint(dis.program[dis.pc+1]) << 8) | uint(dis.program[dis.pc+2]) target := int(offset) + dis.pc + 3 label, labelExists := dis.pendingLabels[target] if !labelExists { dis.labelCount++ label = fmt.Sprintf("label%d", dis.labelCount) dis.putLabel(label, target) } _, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, label) } func disLoad(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } n := uint(dis.program[dis.pc+1]) dis.nextpc = dis.pc + 2 _, dis.err = fmt.Fprintf(dis.out, "load %d\n", n) } func disStore(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } n := uint(dis.program[dis.pc+1]) dis.nextpc = dis.pc + 2 _, dis.err = fmt.Fprintf(dis.out, "store %d\n", n) } func disAssetHolding(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 arg := dis.program[dis.pc+1] if int(arg) >= len(AssetHoldingFieldNames) { dis.err = fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc) return } _, dis.err = fmt.Fprintf(dis.out, "asset_holding_get %s\n", AssetHoldingFieldNames[arg]) } func disAssetParams(dis *disassembleState, spec *OpSpec) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) return } dis.nextpc = dis.pc + 2 arg := dis.program[dis.pc+1] if int(arg) >= len(AssetParamsFieldNames) { dis.err = fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc) return } _, dis.err = fmt.Fprintf(dis.out, "asset_params_get %s\n", AssetParamsFieldNames[arg]) } type disInfo struct { pcOffset []PCOffset hasStatefulOps bool } // disassembleInstrumented is like Disassemble, but additionally returns where // each program counter value maps in the disassembly func disassembleInstrumented(program []byte) (text string, ds disInfo, err error) { out := strings.Builder{} dis := disassembleState{program: program, out: &out} version, vlen := binary.Uvarint(program) if vlen <= 0 { fmt.Fprintf(dis.out, "// invalid version\n") text = out.String() return } if version > LogicVersion { fmt.Fprintf(dis.out, "// unsupported version %d\n", version) text = out.String() return } fmt.Fprintf(dis.out, "// version %d\n", version) dis.pc = vlen for dis.pc < len(program) { err = dis.outputLabelIfNeeded() if err != nil { return } op := opsByOpcode[version][program[dis.pc]] if op.Modes == runModeApplication { ds.hasStatefulOps = true } if op.Name == "" { ds.pcOffset = append(ds.pcOffset, PCOffset{dis.pc, out.Len()}) msg := fmt.Sprintf("invalid opcode %02x at pc=%d", program[dis.pc], dis.pc) out.WriteString(msg) out.WriteRune('\n') text = out.String() err = errors.New(msg) return } // ds.pcOffset tracks where in the output each opcode maps to assembly ds.pcOffset = append(ds.pcOffset, PCOffset{dis.pc, out.Len()}) // Actually do the disassembly op.dis(&dis, &op) if dis.err != nil { err = dis.err return } dis.pc = dis.nextpc } err = dis.outputLabelIfNeeded() if err != nil { return } text = out.String() return } // Disassemble produces a text form of program bytes. // AssembleString(Disassemble()) should result in the same program bytes. func Disassemble(program []byte) (text string, err error) { text, _, err = disassembleInstrumented(program) return } // HasStatefulOps checks if the program has stateful opcodes func HasStatefulOps(program []byte) (bool, error) { _, ds, err := disassembleInstrumented(program) return ds.hasStatefulOps, err }
1
41,748
Does this happen somewhere else now?
algorand-go-algorand
go
@@ -749,7 +749,8 @@ import browser from './browser'; } // Support H264 Level 52 (Tizen 5.0) - app only - if (browser.tizenVersion >= 5 && window.NativeShell) { + if ((browser.tizenVersion >= 5 && window.NativeShell) || + videoTestElement.canPlayType('video/mp4; codecs="avc1.640834"').replace(/no/, '')) { maxH264Level = 52; }
1
import appSettings from './settings/appSettings'; import * as userSettings from './settings/userSettings'; import browser from './browser'; /* eslint-disable indent */ function canPlayH264(videoTestElement) { return !!(videoTestElement.canPlayType && videoTestElement.canPlayType('video/mp4; codecs="avc1.42E01E, mp4a.40.2"').replace(/no/, '')); } function canPlayHevc(videoTestElement, options) { if (browser.tizen || browser.xboxOne || browser.web0s || options.supportsHevc) { return true; } if (browser.ps4) { return false; } // hevc main level 4.0 return !!videoTestElement.canPlayType && (videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.L120"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.1.L120"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.0.L120"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.1.0.L120"').replace(/no/, '')); } let _supportsTextTracks; function supportsTextTracks() { if (browser.tizen) { return true; } if (_supportsTextTracks == null) { _supportsTextTracks = document.createElement('video').textTracks != null; } // For now, until ready return _supportsTextTracks; } let _canPlayHls; function canPlayHls() { if (_canPlayHls == null) { _canPlayHls = canPlayNativeHls() || canPlayHlsWithMSE(); } return _canPlayHls; } function canPlayNativeHls() { if (browser.tizen) { return true; } const media = document.createElement('video'); if (media.canPlayType('application/x-mpegURL').replace(/no/, '') || media.canPlayType('application/vnd.apple.mpegURL').replace(/no/, '')) { return true; } return false; } function canPlayHlsWithMSE() { // text tracks don’t work with this in firefox return window.MediaSource != null; /* eslint-disable-line compat/compat */ } function supportsAc3(videoTestElement) { if (browser.edgeUwp || browser.tizen || browser.web0s) { return true; } // iPhones 5c and older and old model iPads do not support AC-3/E-AC-3 // These models can only run iOS 10.x or lower if (browser.iOS && browser.iOSVersion < 11) { return false; } return videoTestElement.canPlayType('audio/mp4; codecs="ac-3"').replace(/no/, ''); } function supportsEac3(videoTestElement) { if (browser.tizen || browser.web0s) { return true; } // iPhones 5c and older and old model iPads do not support AC-3/E-AC-3 // These models can only run iOS 10.x or lower if (browser.iOS && browser.iOSVersion < 11) { return false; } return videoTestElement.canPlayType('audio/mp4; codecs="ec-3"').replace(/no/, ''); } function supportsAc3InHls(videoTestElement) { if (browser.tizen || browser.web0s) { return true; } if (videoTestElement.canPlayType) { return videoTestElement.canPlayType('application/x-mpegurl; codecs="avc1.42E01E, ac-3"').replace(/no/, '') || videoTestElement.canPlayType('application/vnd.apple.mpegURL; codecs="avc1.42E01E, ac-3"').replace(/no/, ''); } return false; } function canPlayAudioFormat(format) { let typeString; if (format === 'flac') { if (browser.tizen || browser.web0s || browser.edgeUwp) { return true; } } else if (format === 'wma') { if (browser.tizen || browser.edgeUwp) { return true; } } else if (format === 'asf') { if (browser.tizen || browser.web0s || browser.edgeUwp) { return true; } } else if (format === 'opus') { if (!browser.web0s) { typeString = 'audio/ogg; codecs="opus"'; return !!document.createElement('audio').canPlayType(typeString).replace(/no/, ''); } return false; } else if (format === 'alac') { if (browser.iOS || browser.osx) { return true; } } else if (format === 'mp2') { // For now return false; } if (format === 'webma') { typeString = 'audio/webm'; } else if (format === 'mp2') { typeString = 'audio/mpeg'; } else { typeString = 'audio/' + format; } return !!document.createElement('audio').canPlayType(typeString).replace(/no/, ''); } function testCanPlayMkv(videoTestElement) { if (browser.tizen || browser.web0s) { return true; } if (videoTestElement.canPlayType('video/x-matroska').replace(/no/, '') || videoTestElement.canPlayType('video/mkv').replace(/no/, '')) { return true; } if (browser.edgeChromium && browser.windows) { return true; } if (browser.edgeUwp) { return true; } return false; } function testCanPlayAv1(videoTestElement) { if (browser.tizenVersion >= 5.5) { return true; } else if (browser.web0sVersion >= 5 && window.outerHeight >= 2160) { return true; } return videoTestElement.canPlayType('video/webm; codecs="av01.0.15M.10"').replace(/no/, ''); } function testCanPlayTs() { return browser.tizen || browser.web0s || browser.edgeUwp; } function supportsMpeg2Video() { return browser.tizen || browser.web0s || browser.edgeUwp; } function supportsVc1(videoTestElement) { return browser.tizen || browser.web0s || browser.edgeUwp || videoTestElement.canPlayType('video/mp4; codecs="vc-1"').replace(/no/, ''); } function getDirectPlayProfileForVideoContainer(container, videoAudioCodecs, videoTestElement, options) { let supported = false; let profileContainer = container; const videoCodecs = []; switch (container) { case 'asf': supported = browser.tizen || browser.web0s || browser.edgeUwp; videoAudioCodecs = []; break; case 'avi': supported = browser.tizen || browser.web0s || browser.edgeUwp; // New Samsung TV don't support XviD/DivX // Explicitly add supported codecs to make other codecs be transcoded if (browser.tizenVersion >= 4) { videoCodecs.push('h264'); if (canPlayHevc(videoTestElement, options)) { videoCodecs.push('hevc'); } } break; case 'mpg': case 'mpeg': supported = browser.tizen || browser.web0s || browser.edgeUwp; break; case 'flv': supported = browser.tizen; break; case '3gp': case 'mts': case 'trp': case 'vob': case 'vro': supported = browser.tizen; break; case 'mov': supported = browser.safari || browser.tizen || browser.web0s || browser.chrome || browser.edgeChromium || browser.edgeUwp; videoCodecs.push('h264'); break; case 'm2ts': supported = browser.tizen || browser.web0s || browser.edgeUwp; videoCodecs.push('h264'); if (supportsVc1(videoTestElement)) { videoCodecs.push('vc1'); } if (supportsMpeg2Video()) { videoCodecs.push('mpeg2video'); } break; case 'wmv': supported = browser.tizen || browser.web0s || browser.edgeUwp; videoAudioCodecs = []; break; case 'ts': supported = testCanPlayTs(); videoCodecs.push('h264'); // safari doesn't support hevc in TS-HLS if ((browser.tizen || browser.web0s) && canPlayHevc(videoTestElement, options)) { videoCodecs.push('hevc'); } if (supportsVc1(videoTestElement)) { videoCodecs.push('vc1'); } if (supportsMpeg2Video()) { videoCodecs.push('mpeg2video'); } profileContainer = 'ts,mpegts'; break; default: break; } return supported ? { Container: profileContainer, Type: 'Video', VideoCodec: videoCodecs.join(','), AudioCodec: videoAudioCodecs.join(',') } : null; } function getMaxBitrate() { return 120000000; } function getGlobalMaxVideoBitrate() { let isTizenFhd = false; if (browser.tizen) { try { const isTizenUhd = webapis.productinfo.isUdPanelSupported(); isTizenFhd = !isTizenUhd; console.debug('isTizenFhd = ' + isTizenFhd); } catch (error) { console.error('isUdPanelSupported() error code = ' + error.code); } } return browser.ps4 ? 8000000 : (browser.xboxOne ? 12000000 : (browser.edgeUwp ? null : (browser.tizen && isTizenFhd ? 20000000 : null))); } export default function (options) { options = options || {}; const isSurroundSoundSupportedBrowser = browser.safari || browser.chrome || browser.edgeChromium || browser.firefox; const allowedAudioChannels = parseInt(userSettings.allowedAudioChannels() || '-1'); const physicalAudioChannels = (allowedAudioChannels > 0 ? allowedAudioChannels : null) || options.audioChannels || (isSurroundSoundSupportedBrowser || browser.tv || browser.ps4 || browser.xboxOne ? 6 : 2); const bitrateSetting = getMaxBitrate(); const videoTestElement = document.createElement('video'); const canPlayVp8 = videoTestElement.canPlayType('video/webm; codecs="vp8"').replace(/no/, ''); const canPlayVp9 = videoTestElement.canPlayType('video/webm; codecs="vp9"').replace(/no/, ''); const webmAudioCodecs = ['vorbis']; const canPlayMkv = testCanPlayMkv(videoTestElement); const profile = {}; profile.MaxStreamingBitrate = bitrateSetting; profile.MaxStaticBitrate = 100000000; profile.MusicStreamingTranscodingBitrate = Math.min(bitrateSetting, 384000); profile.DirectPlayProfiles = []; let videoAudioCodecs = []; let hlsInTsVideoAudioCodecs = []; let hlsInFmp4VideoAudioCodecs = []; const supportsMp3VideoAudio = videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.69"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.6B"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp3"').replace(/no/, ''); // Not sure how to test for this const supportsMp2VideoAudio = browser.edgeUwp || browser.tizen || browser.web0s; /* eslint-disable compat/compat */ let maxVideoWidth = browser.xboxOne ? (window.screen ? window.screen.width : null) : null; /* eslint-enable compat/compat */ if (options.maxVideoWidth) { maxVideoWidth = options.maxVideoWidth; } const canPlayAacVideoAudio = videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.40.2"').replace(/no/, ''); const canPlayAc3VideoAudio = supportsAc3(videoTestElement); const canPlayEac3VideoAudio = supportsEac3(videoTestElement); const canPlayAc3VideoAudioInHls = supportsAc3InHls(videoTestElement); // Transcoding codec is the first in hlsVideoAudioCodecs. // Prefer AAC, MP3 to other codecs when audio transcoding. if (canPlayAacVideoAudio) { videoAudioCodecs.push('aac'); hlsInTsVideoAudioCodecs.push('aac'); hlsInFmp4VideoAudioCodecs.push('aac'); } if (supportsMp3VideoAudio) { videoAudioCodecs.push('mp3'); // PS4 fails to load HLS with mp3 audio if (!browser.ps4) { hlsInTsVideoAudioCodecs.push('mp3'); } hlsInFmp4VideoAudioCodecs.push('mp3'); } // For AC3/EAC3 remuxing. // Do not use AC3 for audio transcoding unless AAC and MP3 are not supported. if (canPlayAc3VideoAudio) { videoAudioCodecs.push('ac3'); if (canPlayEac3VideoAudio) { videoAudioCodecs.push('eac3'); } if (canPlayAc3VideoAudioInHls) { hlsInTsVideoAudioCodecs.push('ac3'); hlsInFmp4VideoAudioCodecs.push('ac3'); if (canPlayEac3VideoAudio) { hlsInTsVideoAudioCodecs.push('eac3'); hlsInFmp4VideoAudioCodecs.push('eac3'); } } } if (supportsMp2VideoAudio) { videoAudioCodecs.push('mp2'); } let supportsDts = browser.tizen || browser.web0s || options.supportsDts || videoTestElement.canPlayType('video/mp4; codecs="dts-"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="dts+"').replace(/no/, ''); // DTS audio not supported in 2018 models (Tizen 4.0) if (browser.tizenVersion >= 4) { supportsDts = false; } if (supportsDts) { videoAudioCodecs.push('dca'); videoAudioCodecs.push('dts'); } if (browser.tizen || browser.web0s) { videoAudioCodecs.push('pcm_s16le'); videoAudioCodecs.push('pcm_s24le'); } if (options.supportsTrueHd) { videoAudioCodecs.push('truehd'); } if (browser.tizen) { videoAudioCodecs.push('aac_latm'); } if (canPlayAudioFormat('opus')) { videoAudioCodecs.push('opus'); webmAudioCodecs.push('opus'); if (browser.tizen) { hlsInTsVideoAudioCodecs.push('opus'); } } if (canPlayAudioFormat('flac')) { videoAudioCodecs.push('flac'); hlsInFmp4VideoAudioCodecs.push('flac'); } if (canPlayAudioFormat('alac')) { videoAudioCodecs.push('alac'); hlsInFmp4VideoAudioCodecs.push('alac'); } videoAudioCodecs = videoAudioCodecs.filter(function (c) { return (options.disableVideoAudioCodecs || []).indexOf(c) === -1; }); hlsInTsVideoAudioCodecs = hlsInTsVideoAudioCodecs.filter(function (c) { return (options.disableHlsVideoAudioCodecs || []).indexOf(c) === -1; }); hlsInFmp4VideoAudioCodecs = hlsInFmp4VideoAudioCodecs.filter(function (c) { return (options.disableHlsVideoAudioCodecs || []).indexOf(c) === -1; }); const mp4VideoCodecs = []; const webmVideoCodecs = []; const hlsInTsVideoCodecs = []; const hlsInFmp4VideoCodecs = []; if ((browser.safari || browser.tizen || browser.web0s) && canPlayHevc(videoTestElement, options)) { hlsInFmp4VideoCodecs.push('hevc'); } if (canPlayH264(videoTestElement)) { mp4VideoCodecs.push('h264'); hlsInTsVideoCodecs.push('h264'); if (browser.safari || browser.tizen || browser.web0s) { hlsInFmp4VideoCodecs.push('h264'); } } if (canPlayHevc(videoTestElement, options)) { // safari is lying on HDR and 60fps videos, use fMP4 instead if (!browser.safari) { mp4VideoCodecs.push('hevc'); } if (browser.tizen || browser.web0s) { hlsInTsVideoCodecs.push('hevc'); } } if (supportsMpeg2Video()) { mp4VideoCodecs.push('mpeg2video'); } if (supportsVc1(videoTestElement)) { mp4VideoCodecs.push('vc1'); } if (browser.tizen) { mp4VideoCodecs.push('msmpeg4v2'); } if (canPlayVp8) { mp4VideoCodecs.push('vp8'); webmVideoCodecs.push('vp8'); } if (canPlayVp9) { mp4VideoCodecs.push('vp9'); webmVideoCodecs.push('vp9'); } if (testCanPlayAv1(videoTestElement)) { mp4VideoCodecs.push('av1'); webmVideoCodecs.push('av1'); } if (canPlayVp8 || browser.tizen) { videoAudioCodecs.push('vorbis'); } if (webmVideoCodecs.length) { profile.DirectPlayProfiles.push({ Container: 'webm', Type: 'Video', VideoCodec: webmVideoCodecs.join(','), AudioCodec: webmAudioCodecs.join(',') }); } if (mp4VideoCodecs.length) { profile.DirectPlayProfiles.push({ Container: 'mp4,m4v', Type: 'Video', VideoCodec: mp4VideoCodecs.join(','), AudioCodec: videoAudioCodecs.join(',') }); } if (canPlayMkv && mp4VideoCodecs.length) { profile.DirectPlayProfiles.push({ Container: 'mkv', Type: 'Video', VideoCodec: mp4VideoCodecs.join(','), AudioCodec: videoAudioCodecs.join(',') }); } // These are formats we can't test for but some devices will support ['m2ts', 'wmv', 'ts', 'asf', 'avi', 'mpg', 'mpeg', 'flv', '3gp', 'mts', 'trp', 'vob', 'vro', 'mov'].map(function (container) { return getDirectPlayProfileForVideoContainer(container, videoAudioCodecs, videoTestElement, options); }).filter(function (i) { return i != null; }).forEach(function (i) { profile.DirectPlayProfiles.push(i); }); ['opus', 'mp3', 'mp2', 'aac', 'flac', 'alac', 'webma', 'wma', 'wav', 'ogg', 'oga'].filter(canPlayAudioFormat).forEach(function (audioFormat) { profile.DirectPlayProfiles.push({ Container: audioFormat, Type: 'Audio' }); // https://www.webmproject.org/about/faq/ if (audioFormat === 'opus' || audioFormat === 'webma') { profile.DirectPlayProfiles.push({ Container: 'webm', AudioCodec: audioFormat, Type: 'Audio' }); } // aac also appears in the m4a and m4b container // m4a/alac only works when using safari if (audioFormat === 'aac' || audioFormat === 'alac') { profile.DirectPlayProfiles.push({ Container: 'm4a', AudioCodec: audioFormat, Type: 'Audio' }); profile.DirectPlayProfiles.push({ Container: 'm4b', AudioCodec: audioFormat, Type: 'Audio' }); } }); profile.TranscodingProfiles = []; const hlsBreakOnNonKeyFrames = browser.iOS || browser.osx || browser.edge || !canPlayNativeHls() ? true : false; if (canPlayHls() && browser.enableHlsAudio !== false) { profile.TranscodingProfiles.push({ // hlsjs, edge, and android all seem to require ts container Container: !canPlayNativeHls() || browser.edge || browser.android ? 'ts' : 'aac', Type: 'Audio', AudioCodec: 'aac', Context: 'Streaming', Protocol: 'hls', MaxAudioChannels: physicalAudioChannels.toString(), MinSegments: browser.iOS || browser.osx ? '2' : '1', BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames }); } // For streaming, prioritize opus transcoding after mp3/aac. It is too problematic with random failures // But for static (offline sync), it will be just fine. // Prioritize aac higher because the encoder can accept more channels than mp3 ['aac', 'mp3', 'opus', 'wav'].filter(canPlayAudioFormat).forEach(function (audioFormat) { profile.TranscodingProfiles.push({ Container: audioFormat, Type: 'Audio', AudioCodec: audioFormat, Context: 'Streaming', Protocol: 'http', MaxAudioChannels: physicalAudioChannels.toString() }); }); ['opus', 'mp3', 'aac', 'wav'].filter(canPlayAudioFormat).forEach(function (audioFormat) { profile.TranscodingProfiles.push({ Container: audioFormat, Type: 'Audio', AudioCodec: audioFormat, Context: 'Static', Protocol: 'http', MaxAudioChannels: physicalAudioChannels.toString() }); }); if (canPlayMkv && !browser.tizen && options.enableMkvProgressive !== false) { profile.TranscodingProfiles.push({ Container: 'mkv', Type: 'Video', AudioCodec: videoAudioCodecs.join(','), VideoCodec: mp4VideoCodecs.join(','), Context: 'Streaming', MaxAudioChannels: physicalAudioChannels.toString(), CopyTimestamps: true }); } if (canPlayMkv) { profile.TranscodingProfiles.push({ Container: 'mkv', Type: 'Video', AudioCodec: videoAudioCodecs.join(','), VideoCodec: mp4VideoCodecs.join(','), Context: 'Static', MaxAudioChannels: physicalAudioChannels.toString(), CopyTimestamps: true }); } if (canPlayHls() && options.enableHls !== false) { if (hlsInFmp4VideoCodecs.length && hlsInFmp4VideoAudioCodecs.length && userSettings.preferFmp4HlsContainer() && (browser.safari || browser.tizen || browser.web0s)) { profile.TranscodingProfiles.push({ Container: 'mp4', Type: 'Video', AudioCodec: hlsInFmp4VideoAudioCodecs.join(','), VideoCodec: hlsInFmp4VideoCodecs.join(','), Context: 'Streaming', Protocol: 'hls', MaxAudioChannels: physicalAudioChannels.toString(), MinSegments: browser.iOS || browser.osx ? '2' : '1', BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames }); } if (hlsInTsVideoCodecs.length && hlsInTsVideoAudioCodecs.length) { profile.TranscodingProfiles.push({ Container: 'ts', Type: 'Video', AudioCodec: hlsInTsVideoAudioCodecs.join(','), VideoCodec: hlsInTsVideoCodecs.join(','), Context: 'Streaming', Protocol: 'hls', MaxAudioChannels: physicalAudioChannels.toString(), MinSegments: browser.iOS || browser.osx ? '2' : '1', BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames }); } } if (webmAudioCodecs.length && webmVideoCodecs.length) { profile.TranscodingProfiles.push({ Container: 'webm', Type: 'Video', AudioCodec: webmAudioCodecs.join(','), // TODO: Remove workaround when servers migrate away from 'vpx' for transcoding profiles. VideoCodec: (canPlayVp8 ? webmVideoCodecs.concat('vpx') : webmVideoCodecs).join(','), Context: 'Streaming', Protocol: 'http', // If audio transcoding is needed, limit channels to number of physical audio channels // Trying to transcode to 5 channels when there are only 2 speakers generally does not sound good MaxAudioChannels: physicalAudioChannels.toString() }); } profile.TranscodingProfiles.push({ Container: 'mp4', Type: 'Video', AudioCodec: videoAudioCodecs.join(','), VideoCodec: 'h264', Context: 'Static', Protocol: 'http' }); profile.ContainerProfiles = []; profile.CodecProfiles = []; const supportsSecondaryAudio = browser.tizen || videoTestElement.audioTracks; const aacCodecProfileConditions = []; // Handle he-aac not supported if (!videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.40.5"').replace(/no/, '')) { // TODO: This needs to become part of the stream url in order to prevent stream copy aacCodecProfileConditions.push({ Condition: 'NotEquals', Property: 'AudioProfile', Value: 'HE-AAC' }); } if (!supportsSecondaryAudio) { aacCodecProfileConditions.push({ Condition: 'Equals', Property: 'IsSecondaryAudio', Value: 'false', IsRequired: false }); } if (aacCodecProfileConditions.length) { profile.CodecProfiles.push({ Type: 'VideoAudio', Codec: 'aac', Conditions: aacCodecProfileConditions }); } if (!supportsSecondaryAudio) { profile.CodecProfiles.push({ Type: 'VideoAudio', Conditions: [ { Condition: 'Equals', Property: 'IsSecondaryAudio', Value: 'false', IsRequired: false } ] }); } let maxH264Level = 42; let h264Profiles = 'high|main|baseline|constrained baseline'; if (browser.tizen || browser.web0s || videoTestElement.canPlayType('video/mp4; codecs="avc1.640833"').replace(/no/, '')) { maxH264Level = 51; } // Support H264 Level 52 (Tizen 5.0) - app only if (browser.tizenVersion >= 5 && window.NativeShell) { maxH264Level = 52; } if (browser.tizen || videoTestElement.canPlayType('video/mp4; codecs="avc1.6e0033"').replace(/no/, '')) { // These tests are passing in safari, but playback is failing if (!browser.safari && !browser.iOS && !browser.web0s && !browser.edge && !browser.mobile) { h264Profiles += '|high 10'; } } let maxHevcLevel = 120; let hevcProfiles = 'main'; // hevc main level 4.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.4.L123"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.1.4.L123"').replace(/no/, '')) { maxHevcLevel = 123; } // hevc main10 level 4.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L123"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L123"').replace(/no/, '')) { maxHevcLevel = 123; hevcProfiles = 'main|main 10'; } // hevc main10 level 5.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L153"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L153"').replace(/no/, '')) { maxHevcLevel = 153; hevcProfiles = 'main|main 10'; } // hevc main10 level 6.1 if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L183"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L183"').replace(/no/, '')) { maxHevcLevel = 183; hevcProfiles = 'main|main 10'; } const h264CodecProfileConditions = [ { Condition: 'NotEquals', Property: 'IsAnamorphic', Value: 'true', IsRequired: false }, { Condition: 'EqualsAny', Property: 'VideoProfile', Value: h264Profiles, IsRequired: false }, { Condition: 'LessThanEqual', Property: 'VideoLevel', Value: maxH264Level.toString(), IsRequired: false } ]; const hevcCodecProfileConditions = [ { Condition: 'NotEquals', Property: 'IsAnamorphic', Value: 'true', IsRequired: false }, { Condition: 'EqualsAny', Property: 'VideoProfile', Value: hevcProfiles, IsRequired: false }, { Condition: 'LessThanEqual', Property: 'VideoLevel', Value: maxHevcLevel.toString(), IsRequired: false } ]; if (!browser.edgeUwp && !browser.tizen && !browser.web0s) { h264CodecProfileConditions.push({ Condition: 'NotEquals', Property: 'IsInterlaced', Value: 'true', IsRequired: false }); hevcCodecProfileConditions.push({ Condition: 'NotEquals', Property: 'IsInterlaced', Value: 'true', IsRequired: false }); } if (maxVideoWidth) { h264CodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'Width', Value: maxVideoWidth.toString(), IsRequired: false }); hevcCodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'Width', Value: maxVideoWidth.toString(), IsRequired: false }); } const globalMaxVideoBitrate = (getGlobalMaxVideoBitrate() || '').toString(); const h264MaxVideoBitrate = globalMaxVideoBitrate; const hevcMaxVideoBitrate = globalMaxVideoBitrate; if (h264MaxVideoBitrate) { h264CodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'VideoBitrate', Value: h264MaxVideoBitrate, IsRequired: true }); } if (hevcMaxVideoBitrate) { hevcCodecProfileConditions.push({ Condition: 'LessThanEqual', Property: 'VideoBitrate', Value: hevcMaxVideoBitrate, IsRequired: true }); } // On iOS 12.x, for TS container max h264 level is 4.2 if (browser.iOS && browser.iOSVersion < 13) { const codecProfile = { Type: 'Video', Codec: 'h264', Container: 'ts', Conditions: h264CodecProfileConditions.filter((condition) => { return condition.Property !== 'VideoLevel'; }) }; codecProfile.Conditions.push({ Condition: 'LessThanEqual', Property: 'VideoLevel', Value: '42', IsRequired: false }); profile.CodecProfiles.push(codecProfile); } profile.CodecProfiles.push({ Type: 'Video', Codec: 'h264', Conditions: h264CodecProfileConditions }); profile.CodecProfiles.push({ Type: 'Video', Codec: 'hevc', Conditions: hevcCodecProfileConditions }); const globalVideoConditions = []; if (globalMaxVideoBitrate) { globalVideoConditions.push({ Condition: 'LessThanEqual', Property: 'VideoBitrate', Value: globalMaxVideoBitrate }); } if (maxVideoWidth) { globalVideoConditions.push({ Condition: 'LessThanEqual', Property: 'Width', Value: maxVideoWidth.toString(), IsRequired: false }); } if (globalVideoConditions.length) { profile.CodecProfiles.push({ Type: 'Video', Conditions: globalVideoConditions }); } // Subtitle profiles // External vtt or burn in profile.SubtitleProfiles = []; const subtitleBurninSetting = appSettings.get('subtitleburnin'); if (subtitleBurninSetting !== 'all') { if (supportsTextTracks()) { profile.SubtitleProfiles.push({ Format: 'vtt', Method: 'External' }); } if (options.enableSsaRender !== false && !options.isRetry && subtitleBurninSetting !== 'allcomplexformats') { profile.SubtitleProfiles.push({ Format: 'ass', Method: 'External' }); profile.SubtitleProfiles.push({ Format: 'ssa', Method: 'External' }); } } profile.ResponseProfiles = []; profile.ResponseProfiles.push({ Type: 'Video', Container: 'm4v', MimeType: 'video/mp4' }); return profile; } /* eslint-enable indent */
1
19,849
Remove trailing space at this line, amend the commit, and then force-push.
jellyfin-jellyfin-web
js
@@ -21,6 +21,8 @@ import ( "testing" "time" + "go.opentelemetry.io/api/trace" + "google.golang.org/grpc/codes" "go.opentelemetry.io/api/core"
1
// Copyright 2019, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdout import ( "bytes" "context" "encoding/json" "testing" "time" "google.golang.org/grpc/codes" "go.opentelemetry.io/api/core" "go.opentelemetry.io/sdk/export" ) func TestExporter_ExportSpan(t *testing.T) { ex, err := NewExporter(Options{}) if err != nil { t.Errorf("Error constructing stdout exporter %s", err) } // override output writer for testing var b bytes.Buffer ex.outputWriter = &b // setup test span now := time.Now() traceID := core.TraceID{High: 0x0102030405060708, Low: 0x090a0b0c0d0e0f10} spanID := uint64(0x0102030405060708) keyValue := "value" doubleValue := float64(123.456) testSpan := &export.SpanData{ SpanContext: core.SpanContext{ TraceID: traceID, SpanID: spanID, }, Name: "/foo", StartTime: now, EndTime: now, Attributes: []core.KeyValue{ { Key: core.Key("key"), Value: core.Value{Type: core.STRING, String: keyValue}, }, { Key: core.Key("double"), Value: core.Value{Type: core.FLOAT64, Float64: doubleValue}, }, }, Status: codes.Unknown, } ex.ExportSpan(context.Background(), testSpan) expectedSerializedNow, _ := json.Marshal(now) got := b.String() expectedOutput := `{"SpanContext":{` + `"TraceID":{"High":72623859790382856,"Low":651345242494996240},` + `"SpanID":72623859790382856,"TraceFlags":0},` + `"ParentSpanID":0,` + `"SpanKind":0,` + `"Name":"/foo",` + `"StartTime":` + string(expectedSerializedNow) + "," + `"EndTime":` + string(expectedSerializedNow) + "," + `"Attributes":[` + `{` + `"Key":"key",` + `"Value":{"Type":8,"Bool":false,"Int64":0,"Uint64":0,"Float64":0,"String":"value","Bytes":null}` + `},` + `{` + `"Key":"double",` + `"Value":{"Type":7,"Bool":false,"Int64":0,"Uint64":0,"Float64":123.456,"String":"","Bytes":null}` + `}` + `],` + `"MessageEvents":null,` + `"Links":null,` + `"Status":2,` + `"HasRemoteParent":false,` + `"DroppedAttributeCount":0,` + `"DroppedMessageEventCount":0,` + `"DroppedLinkCount":0,` + `"ChildSpanCount":0}` + "\n" if got != expectedOutput { t.Errorf("Want: %v but got: %v", expectedOutput, got) } }
1
10,209
This should be grouped together with the import of "go.opentelemetry.io/{api/core,sdk/export}" below.
open-telemetry-opentelemetry-go
go
@@ -66,10 +66,14 @@ REQUIREMENTS_MAP = { {'module_name': 'instance_network_interface_scanner', 'class_name': 'InstanceNetworkInterfaceScanner', 'rules_filename': 'instance_network_interface_rules.yaml'}, + 'ke_scanner': + {'module_name': 'ke_scanner', + 'class_name': 'KeScanner', + 'rules_filename': 'ke_rules.yaml'}, 'ke_version_scanner': {'module_name': 'ke_version_scanner', 'class_name': 'KeVersionScanner', - 'rules_filename': 'ke_rules.yaml'}, + 'rules_filename': 'ke_version_rules.yaml'}, 'log_sink': {'module_name': 'log_sink_scanner', 'class_name': 'LogSinkScanner',
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Map of the requirements needed by the scanners.""" # TODO: Standardize the module and class names so that we can use reflection # instead of maintaining them explicitly. # Use the naming pattern foo_module.py, class FooModule REQUIREMENTS_MAP = { 'audit_logging': {'module_name': 'audit_logging_scanner', 'class_name': 'AuditLoggingScanner', 'rules_filename': 'audit_logging_rules.yaml'}, 'bigquery': {'module_name': 'bigquery_scanner', 'class_name': 'BigqueryScanner', 'rules_filename': 'bigquery_rules.yaml'}, 'blacklist': {'module_name': 'blacklist_scanner', 'class_name': 'BlacklistScanner', 'rules_filename': 'blacklist_rules.yaml'}, 'bucket_acl': {'module_name': 'bucket_rules_scanner', 'class_name': 'BucketsAclScanner', 'rules_filename': 'bucket_rules.yaml'}, 'cloudsql_acl': {'module_name': 'cloudsql_rules_scanner', 'class_name': 'CloudSqlAclScanner', 'rules_filename': 'cloudsql_rules.yaml'}, 'enabled_apis': {'module_name': 'enabled_apis_scanner', 'class_name': 'EnabledApisScanner', 'rules_filename': 'enabled_apis_rules.yaml'}, 'firewall_rule': {'module_name': 'firewall_rules_scanner', 'class_name': 'FirewallPolicyScanner', 'rules_filename': 'firewall_rules.yaml'}, 'forwarding_rule': {'module_name': 'forwarding_rule_scanner', 'class_name': 'ForwardingRuleScanner', 'rules_filename': 'forwarding_rules.yaml'}, 'group': {'module_name': 'groups_scanner', 'class_name': 'GroupsScanner', 'rules_filename': 'group_rules.yaml'}, 'iam_policy': {'module_name': 'iam_rules_scanner', 'class_name': 'IamPolicyScanner', 'rules_filename': 'iam_rules.yaml'}, 'iap': {'module_name': 'iap_scanner', 'class_name': 'IapScanner', 'rules_filename': 'iap_rules.yaml'}, 'instance_network_interface': {'module_name': 'instance_network_interface_scanner', 'class_name': 'InstanceNetworkInterfaceScanner', 'rules_filename': 'instance_network_interface_rules.yaml'}, 'ke_version_scanner': {'module_name': 'ke_version_scanner', 'class_name': 'KeVersionScanner', 'rules_filename': 'ke_rules.yaml'}, 'log_sink': {'module_name': 'log_sink_scanner', 'class_name': 'LogSinkScanner', 'rules_filename': 'log_sink_rules.yaml'}, 'service_account_key': {'module_name': 'service_account_key_scanner', 'class_name': 'ServiceAccountKeyScanner', 'rules_filename': 'service_account_key_rules.yaml'}, }
1
31,696
What's the impact to people upgrading to the new version? If they don't change their file names, they'll suddenly be broken. While I agree that your naming makes more intuitive sense, I think we need to maintain backwards compatibility. Please choose a new rules file name for the new scanner and keep ke_rules.yaml for the existing version scanner.
forseti-security-forseti-security
py
@@ -698,7 +698,7 @@ class InternalFrame(object): index_spark_columns = self.index_spark_columns return index_spark_columns + [ spark_column - for label, spark_column in zip(self.column_labels, self.data_spark_columns) + for spark_column in self.data_spark_columns if all(not spark_column._jc.equals(scol._jc) for scol in index_spark_columns) ]
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ An internal immutable DataFrame with some metadata to manage indexes. """ import re from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING from itertools import accumulate from collections import OrderedDict import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like from pyspark import sql as spark from pyspark._globals import _NoValue, _NoValueType from pyspark.sql import functions as F, Window from pyspark.sql.functions import PandasUDFType, pandas_udf from pyspark.sql.types import BooleanType, DataType, StructField, StructType, LongType try: from pyspark.sql.types import to_arrow_type except ImportError: from pyspark.sql.pandas.types import to_arrow_type from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. if TYPE_CHECKING: # This is required in old Python 3.5 to prevent circular reference. from databricks.koalas.series import Series from databricks.koalas.config import get_option from databricks.koalas.typedef import infer_pd_series_spark_type, spark_type_to_pandas_dtype from databricks.koalas.utils import ( column_labels_level, default_session, lazy_property, name_like_string, scol_for, verify_temp_column_name, ) # A function to turn given numbers to Spark columns that represent Koalas index. SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0) # A pattern to check if the name of a Spark column is a Koalas index name or not. SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__") NATURAL_ORDER_COLUMN_NAME = "__natural_order__" HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME} class InternalFrame(object): """ The internal immutable DataFrame which manages Spark DataFrame and column names and index information. .. note:: this is an internal class. It is not supposed to be exposed to users and users should not directly access to it. The internal immutable DataFrame represents the index information for a DataFrame it belongs to. For instance, if we have a Koalas DataFrame as below, Pandas DataFrame does not store the index as columns. >>> kdf = ks.DataFrame({ ... 'A': [1, 2, 3, 4], ... 'B': [5, 6, 7, 8], ... 'C': [9, 10, 11, 12], ... 'D': [13, 14, 15, 16], ... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E']) >>> kdf # doctest: +NORMALIZE_WHITESPACE A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 However, all columns including index column are also stored in Spark DataFrame internally as below. >>> kdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ In order to fill this gap, the current metadata is used by mapping Spark's internal column to Koalas' index. See the method below: * `spark_frame` represents the internal Spark DataFrame * `data_spark_column_names` represents non-indexing Spark column names * `data_spark_columns` represents non-indexing Spark columns * `index_spark_column_names` represents internal index Spark column names * `index_spark_columns` represents internal index Spark columns * `spark_column_names` represents all columns * `index_names` represents the external index name as a label * `index_map` is zipped pairs of `index_spark_column_names` and `index_names` * `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index. * `to_pandas_frame` represents pandas DataFrame derived by the metadata >>> internal = kdf._internal >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.data_spark_column_names ['A', 'B', 'C', 'D', 'E'] >>> internal.index_spark_column_names ['__index_level_0__'] >>> internal.spark_column_names ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None] >>> internal.index_map OrderedDict([('__index_level_0__', None)]) >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.to_pandas_frame A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 In case that index is set to one of the existing column as below: >>> kdf1 = kdf.set_index("A") >>> kdf1 # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 >>> kdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal = kdf1._internal >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.data_spark_column_names ['B', 'C', 'D', 'E'] >>> internal.index_spark_column_names ['A'] >>> internal.spark_column_names ['A', 'B', 'C', 'D', 'E'] >>> internal.index_names [('A',)] >>> internal.index_map OrderedDict([('A', ('A',))]) >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 In case that index becomes a multi index as below: >>> kdf2 = kdf.set_index("A", append=True) >>> kdf2 # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 >>> kdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal = kdf2._internal >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.data_spark_column_names ['B', 'C', 'D', 'E'] >>> internal.index_spark_column_names ['__index_level_0__', 'A'] >>> internal.spark_column_names ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None, ('A',)] >>> internal.index_map OrderedDict([('__index_level_0__', None), ('A', ('A',))]) >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 For multi-level columns, it also holds column_labels >>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ... ('Y', 'C'), ('Y', 'D')]) >>> kdf3 = ks.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16], ... [17, 18, 19, 20]], columns = columns) >>> kdf3 # doctest: +NORMALIZE_WHITESPACE X Y A B C D 0 1 2 3 4 1 5 6 7 8 2 9 10 11 12 3 13 14 15 16 4 17 18 19 20 >>> internal = kdf3._internal >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+------+------+------+------+-----------------+ |__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__| +-----------------+------+------+------+------+-----------------+ | 0| 1| 2| 3| 4|...| | 1| 5| 6| 7| 8|...| | 2| 9| 10| 11| 12|...| | 3| 13| 14| 15| 16|...| | 4| 17| 18| 19| 20|...| +-----------------+------+------+------+------+-----------------+ >>> internal.data_spark_column_names ['(X, A)', '(X, B)', '(Y, C)', '(Y, D)'] >>> internal.column_labels [('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')] For series, it also holds scol to represent the column. >>> kseries = kdf1.B >>> kseries A 1 5 2 6 3 7 4 8 Name: B, dtype: int64 >>> internal = kseries._internal >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+---+---+---+---+---+-----------------+ |__index_level_0__| A| B| C| D| E|__natural_order__| +-----------------+---+---+---+---+---+-----------------+ | 0| 1| 5| 9| 13| 17|...| | 1| 2| 6| 10| 14| 18|...| | 2| 3| 7| 11| 15| 19|...| | 3| 4| 8| 12| 16| 20|...| +-----------------+---+---+---+---+---+-----------------+ >>> internal.spark_column Column<b'B'> >>> internal.data_spark_column_names ['B'] >>> internal.index_spark_column_names ['A'] >>> internal.spark_column_names ['A', 'B'] >>> internal.index_names [('A',)] >>> internal.index_map OrderedDict([('A', ('A',))]) >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +---+---+ | A| B| +---+---+ | 1| 5| | 2| 6| | 3| 7| | 4| 8| +---+---+ >>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE B A 1 5 2 6 3 7 4 8 """ def __init__( self, spark_frame: spark.DataFrame, index_map: Optional[Dict[str, Optional[Tuple[str, ...]]]], column_labels: Optional[List[Tuple[str, ...]]] = None, data_spark_columns: Optional[List[spark.Column]] = None, column_label_names: Optional[List[str]] = None, spark_column: Optional[spark.Column] = None, ) -> None: """ Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and index fields and names. :param spark_frame: Spark DataFrame to be managed. :param index_map: dictionary of string pairs Each pair holds the index field name which exists in Spark fields, and the index name. :param column_labels: list of tuples with the same length The multi-level values in the tuples. :param data_spark_columns: list of Spark Column Spark Columns to appear as columns. If spark_column is not None, this argument is ignored, otherwise if this is None, calculated from spark_frame. :param column_label_names: Names for each of the index levels. :param spark_column: Spark Column to be managed. See the examples below to refer what each parameter means. >>> column_labels = pd.MultiIndex.from_tuples( ... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"]) >>> row_index = pd.MultiIndex.from_tuples( ... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')], ... names=["row_index_a", "row_index_b"]) >>> kdf = ks.DataFrame( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels) >>> kdf.set_index(('a', 'x'), append=True, inplace=True) >>> kdf # doctest: +NORMALIZE_WHITESPACE column_labels_a a b column_labels_b y z row_index_a row_index_b (a, x) foo bar 1 2 3 4 5 6 zoo bar 7 8 9 >>> internal = kdf[('a', 'y')]._internal >>> internal._sdf.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS +-----------------+-----------------+------+------+------+... |__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|... +-----------------+-----------------+------+------+------+... | foo| bar| 1| 2| 3|... | foo| bar| 4| 5| 6|... | zoo| bar| 7| 8| 9|... +-----------------+-----------------+------+------+------+... >>> internal._index_map # doctest: +NORMALIZE_WHITESPACE OrderedDict([('__index_level_0__', ('row_index_a',)), ('__index_level_1__', ('row_index_b',)), ('(a, x)', ('a', 'x'))]) >>> internal._column_labels [('a', 'y')] >>> internal._data_spark_columns [Column<b'(a, y)'>] >>> list(internal._column_label_names) ['column_labels_a', 'column_labels_b'] >>> internal.spark_column Column<b'(a, y)'> """ assert isinstance(spark_frame, spark.DataFrame) assert not spark_frame.isStreaming, "Koalas does not support Structured Streaming." if index_map is None: assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), ( "Index columns should not appear in columns of the Spark DataFrame. Avoid " "index column names [%s]." % SPARK_INDEX_NAME_PATTERN ) # Create default index. spark_frame = InternalFrame.attach_default_index(spark_frame) index_map = OrderedDict({SPARK_DEFAULT_INDEX_NAME: None}) if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns: spark_frame = spark_frame.withColumn( NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id() ) assert isinstance(index_map, OrderedDict), index_map assert all( isinstance(index_field, str) and ( index_name is None or ( isinstance(index_name, tuple) and all(isinstance(name, str) for name in index_name) ) ) for index_field, index_name in index_map.items() ), index_map assert spark_column is None or isinstance(spark_column, spark.Column) assert data_spark_columns is None or all( isinstance(scol, spark.Column) for scol in data_spark_columns ) self._sdf = spark_frame # type: spark.DataFrame self._index_map = index_map # type: Dict[str, Optional[Tuple[str, ...]]] self._spark_column = spark_column # type: Optional[spark.Column] if spark_column is not None: self._data_spark_columns = [spark_column] elif data_spark_columns is None: index_columns = set(index_column for index_column in self._index_map) self._data_spark_columns = [ scol_for(spark_frame, col) for col in spark_frame.columns if col not in index_columns and col not in HIDDEN_COLUMNS ] else: self._data_spark_columns = data_spark_columns if spark_column is not None: assert column_labels is not None and len(column_labels) == 1, column_labels assert all( label is None or (isinstance(label, tuple) and len(label) > 0) for label in column_labels ), column_labels self._column_labels = column_labels elif column_labels is None: self._column_labels = [ (spark_frame.select(scol).columns[0],) for scol in self._data_spark_columns ] else: assert len(column_labels) == len(self._data_spark_columns), ( len(column_labels), len(self._data_spark_columns), ) assert all(isinstance(i, tuple) for i in column_labels), column_labels assert len(set(len(i) for i in column_labels)) <= 1, column_labels self._column_labels = column_labels if column_label_names is not None and not is_list_like(column_label_names): raise ValueError("Column_index_names should be list-like or None for a MultiIndex") if isinstance(column_label_names, list): if all(name is None for name in column_label_names): self._column_label_names = None else: self._column_label_names = column_label_names else: self._column_label_names = column_label_names @staticmethod def attach_default_index(sdf, default_index_type=None): """ This method attaches a default index to Spark DataFrame. Spark does not have the index notion so corresponding column should be generated. There are several types of default index can be configured by `compute.default_index_type`. >>> spark_frame = ks.range(10).to_spark() >>> spark_frame DataFrame[id: bigint] It adds the default index column '__index_level_0__'. >>> spark_frame = InternalFrame.attach_default_index(spark_frame) >>> spark_frame DataFrame[__index_level_0__: int, id: bigint] It throws an exception if the given column name already exists. >>> InternalFrame.attach_default_index(spark_frame) ... # doctest: +ELLIPSIS Traceback (most recent call last): ... AssertionError: '__index_level_0__' already exists... """ index_column = SPARK_DEFAULT_INDEX_NAME assert ( index_column not in sdf.columns ), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns) if default_index_type is None: default_index_type = get_option("compute.default_index_type") scols = [scol_for(sdf, column) for column in sdf.columns] if default_index_type == "sequence": sequential_index = ( F.row_number().over(Window.orderBy(F.monotonically_increasing_id())) - 1 ) return sdf.select(sequential_index.alias(index_column), *scols) elif default_index_type == "distributed-sequence": return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column) elif default_index_type == "distributed": return InternalFrame.attach_distributed_column(sdf, column_name=index_column) else: raise ValueError( "'compute.default_index_type' should be one of 'sequence'," " 'distributed-sequence' and 'distributed'" ) @staticmethod def attach_distributed_column(sdf, column_name): scols = [scol_for(sdf, column) for column in sdf.columns] return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols) @staticmethod def attach_distributed_sequence_column(sdf, column_name): """ This method attaches a Spark column that has a sequence in a distributed manner. This is equivalent to the column assigned when default index type 'distributed-sequence'. >>> sdf = ks.DataFrame(['a', 'b', 'c']).to_spark() >>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence") >>> sdf.sort("sequence").show() # doctest: +NORMALIZE_WHITESPACE +--------+---+ |sequence| 0| +--------+---+ | 0| a| | 1| b| | 2| c| +--------+---+ """ scols = [scol_for(sdf, column) for column in sdf.columns] spark_partition_column = verify_temp_column_name(sdf, "__spark_partition_id__") offset_column = verify_temp_column_name(sdf, "__offset__") row_number_column = verify_temp_column_name(sdf, "__row_number__") # 1. Calculates counts per each partition ID. `counts` here is, for instance, # { # 1: 83, # 6: 83, # 3: 83, # ... # } sdf = sdf.withColumn(spark_partition_column, F.spark_partition_id()) counts = map( lambda x: (x["key"], x["count"]), sdf.groupby(sdf[spark_partition_column].alias("key")).count().collect(), ) # 2. Calculates cumulative sum in an order of partition id. # Note that it does not matter if partition id guarantees its order or not. # We just need a one-by-one sequential id. # sort by partition key. sorted_counts = sorted(counts, key=lambda x: x[0]) # get cumulative sum in an order of partition key. cumulative_counts = [0] + list(accumulate(map(lambda count: count[1], sorted_counts))) # zip it with partition key. sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts)) # 3. Attach offset for each partition. @pandas_udf(LongType(), PandasUDFType.SCALAR) def offset(id): current_partition_offset = sums[id.iloc[0]] return pd.Series(current_partition_offset).repeat(len(id)) sdf = sdf.withColumn(offset_column, offset(spark_partition_column)) # 4. Calculate row_number in each partition. w = Window.partitionBy(spark_partition_column).orderBy(F.monotonically_increasing_id()) row_number = F.row_number().over(w) sdf = sdf.withColumn(row_number_column, row_number) # 5. Calculate the index. return sdf.select( (sdf[offset_column] + sdf[row_number_column] - 1).alias(column_name), *scols ) def spark_column_name_for(self, label: Tuple[str, ...]) -> str: """ Return the actual Spark column name for the given column label. """ return self.spark_frame.select(self.spark_column_for(label)).columns[0] def spark_column_for(self, label: Tuple[str, ...]): """ Return Spark Column for the given column label. """ column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns)) if label in column_labels_to_scol: return column_labels_to_scol[label] # type: ignore else: raise KeyError(name_like_string(label)) def spark_type_for(self, label: Tuple[str, ...]) -> DataType: """ Return DataType for the given column label. """ return self.spark_frame.select(self.spark_column_for(label)).schema[0].dataType def spark_column_nullable_for(self, label: Tuple[str, ...]) -> bool: """ Return nullability for the given column label. """ return self.spark_frame.select(self.spark_column_for(label)).schema[0].nullable @property def spark_frame(self) -> spark.DataFrame: """ Return the managed Spark DataFrame. """ return self._sdf @property def spark_column(self) -> Optional[spark.Column]: """ Return the managed Spark Column. """ return self._spark_column @lazy_property def data_spark_column_names(self) -> List[str]: """ Return the managed column field names. """ return self.spark_frame.select(self.data_spark_columns).columns @property def data_spark_columns(self) -> List[spark.Column]: """ Return Spark Columns for the managed data columns. """ return self._data_spark_columns @lazy_property def index_spark_column_names(self) -> List[str]: """ Return the managed index field names. """ return list(self.index_map.keys()) @lazy_property def index_spark_columns(self) -> List[spark.Column]: """ Return Spark Columns for the managed index columns. """ return [scol_for(self.spark_frame, column) for column in self.index_spark_column_names] @lazy_property def spark_column_names(self) -> List[str]: """ Return all the field names including index field names. """ return self.spark_frame.select(self.spark_columns).columns @lazy_property def spark_columns(self) -> List[spark.Column]: """ Return Spark Columns for the managed columns including index columns. """ index_spark_columns = self.index_spark_columns return index_spark_columns + [ spark_column for label, spark_column in zip(self.column_labels, self.data_spark_columns) if all(not spark_column._jc.equals(scol._jc) for scol in index_spark_columns) ] @property def index_map(self) -> Dict[str, Optional[Tuple[str, ...]]]: """ Return the managed index information. """ assert len(self._index_map) > 0 return self._index_map @lazy_property def index_names(self) -> List[Optional[Tuple[str, ...]]]: """ Return the managed index names. """ return list(self.index_map.values()) @property def column_labels(self) -> List[Tuple[str, ...]]: """ Return the managed column index. """ return self._column_labels @lazy_property def column_labels_level(self) -> int: """ Return the level of the column index. """ return column_labels_level(self._column_labels) @property def column_label_names(self) -> Optional[List[str]]: """ Return names of the index levels. """ return self._column_label_names @lazy_property def to_internal_spark_frame(self) -> spark.DataFrame: """ Return as Spark DataFrame. This contains index columns as well and should be only used for internal purposes. """ index_spark_columns = self.index_spark_columns data_columns = [] for i, (label, spark_column, column_name) in enumerate( zip(self.column_labels, self.data_spark_columns, self.data_spark_column_names) ): if all(not spark_column._jc.equals(scol._jc) for scol in index_spark_columns): name = str(i) if label is None else name_like_string(label) if column_name != name: spark_column = spark_column.alias(name) data_columns.append(spark_column) return self.spark_frame.select(index_spark_columns + data_columns) @lazy_property def to_pandas_frame(self) -> pd.DataFrame: """ Return as pandas DataFrame. """ sdf = self.to_internal_spark_frame pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: pdf = pdf.astype( {field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema} ) column_names = [] for i, (label, spark_column, column_name) in enumerate( zip(self.column_labels, self.data_spark_columns, self.data_spark_column_names) ): for index_spark_column_name, index_spark_column in zip( self.index_spark_column_names, self.index_spark_columns ): if spark_column._jc.equals(index_spark_column._jc): column_names.append(index_spark_column_name) break else: name = str(i) if label is None else name_like_string(label) if column_name != name: column_name = name column_names.append(column_name) append = False for index_field in self.index_spark_column_names: drop = index_field not in column_names pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[column_names] if self.column_labels_level > 1: pdf.columns = pd.MultiIndex.from_tuples(self._column_labels) else: pdf.columns = [None if label is None else label[0] for label in self._column_labels] if self._column_label_names is not None: pdf.columns.names = self._column_label_names index_names = self.index_names if len(index_names) > 0: pdf.index.names = [ name if name is None or len(name) > 1 else name[0] for name in index_names ] return pdf def with_new_sdf( self, sdf: spark.DataFrame, data_columns: Optional[List[str]] = None ) -> "InternalFrame": """ Copy the immutable _InternalFrame with the updates by the specified Spark DataFrame. :param sdf: the new Spark DataFrame :param data_columns: the new column names. If None, the original one is used. :return: the copied _InternalFrame. """ assert self.spark_column is None if data_columns is None: data_columns = self.data_spark_column_names else: assert len(data_columns) == len(self.column_labels), ( len(data_columns), len(self.column_labels), ) sdf = sdf.drop(NATURAL_ORDER_COLUMN_NAME) return self.copy( spark_frame=sdf, data_spark_columns=[scol_for(sdf, col) for col in data_columns] ) def with_new_columns( self, scols_or_ksers: List[Union[spark.Column, "Series"]], column_labels: Optional[List[Tuple[str, ...]]] = None, column_label_names: Optional[Union[List[str], _NoValueType]] = _NoValue, keep_order: bool = True, ) -> "InternalFrame": """ Copy the immutable _InternalFrame with the updates by the specified Spark Columns or Series. :param scols_or_ksers: the new Spark Columns or Series. :param column_labels: the new column index. If None, the its column_labels is used when the corresponding `scols_or_ksers` is Series, otherwise the original one is used. :return: the copied _InternalFrame. """ from databricks.koalas.series import Series if column_labels is None: if all(isinstance(scol_or_kser, Series) for scol_or_kser in scols_or_ksers): column_labels = [kser._internal.column_labels[0] for kser in scols_or_ksers] else: assert len(scols_or_ksers) == len(self.column_labels), ( len(scols_or_ksers), len(self.column_labels), ) column_labels = [] for scol_or_kser, label in zip(scols_or_ksers, self.column_labels): if isinstance(scol_or_kser, Series): column_labels.append(scol_or_kser._internal.column_labels[0]) else: column_labels.append(label) else: assert len(scols_or_ksers) == len(column_labels), ( len(scols_or_ksers), len(column_labels), ) data_spark_columns = [] for scol_or_kser, label in zip(scols_or_ksers, column_labels): if isinstance(scol_or_kser, Series): scol = scol_or_kser._internal.spark_column else: scol = scol_or_kser data_spark_columns.append(scol) hidden_columns = [] if keep_order: hidden_columns.append(NATURAL_ORDER_COLUMN_NAME) sdf = self.spark_frame.select( self.index_spark_columns + data_spark_columns + hidden_columns ) if column_label_names is _NoValue: column_label_names = self._column_label_names return self.copy( spark_frame=sdf, column_labels=column_labels, data_spark_columns=[ scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns ], column_label_names=column_label_names, spark_column=None, ) def with_filter(self, pred: Union[spark.Column, "Series"]): """ Copy the immutable _InternalFrame with the updates by the predicate. :param pred: the predicate to filter. :return: the copied _InternalFrame. """ from databricks.koalas.series import Series if isinstance(pred, Series): assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type pred = pred.spark.column else: spark_type = self.spark_frame.select(pred).schema[0].dataType assert isinstance(spark_type, BooleanType), spark_type return self.copy(spark_frame=self.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME).filter(pred)) def copy( self, spark_frame: Union[spark.DataFrame, _NoValueType] = _NoValue, index_map: Optional[Union[Dict[str, Optional[Tuple[str, ...]]], _NoValueType]] = _NoValue, column_labels: Optional[Union[List[Tuple[str, ...]], _NoValueType]] = _NoValue, data_spark_columns: Optional[Union[List[spark.Column], _NoValueType]] = _NoValue, column_label_names: Optional[Union[List[str], _NoValueType]] = _NoValue, spark_column: Optional[Union[spark.Column, _NoValueType]] = _NoValue, ) -> "InternalFrame": """ Copy the immutable DataFrame. :param spark_frame: the new Spark DataFrame. If None, then the original one is used. :param index_map: the new index information. If None, then the original one is used. :param column_labels: the new column index. :param data_spark_columns: the new Spark Columns. If None, then the original ones are used. :param column_label_names: the new names of the index levels. :param spark_column: the new Spark Column. If None, then the original one is used. :return: the copied immutable DataFrame. """ if spark_frame is _NoValue: spark_frame = self.spark_frame if index_map is _NoValue: index_map = self._index_map if column_labels is _NoValue: column_labels = self._column_labels if data_spark_columns is _NoValue: data_spark_columns = self._data_spark_columns if column_label_names is _NoValue: column_label_names = self._column_label_names if spark_column is _NoValue: spark_column = self.spark_column return InternalFrame( spark_frame, index_map=index_map, column_labels=column_labels, data_spark_columns=data_spark_columns, column_label_names=column_label_names, spark_column=spark_column, ) @staticmethod def from_pandas(pdf: pd.DataFrame) -> "InternalFrame": """ Create an immutable DataFrame from pandas DataFrame. :param pdf: :class:`pd.DataFrame` :return: the created immutable DataFrame """ columns = pdf.columns data_columns = [name_like_string(col) for col in columns] if isinstance(columns, pd.MultiIndex): column_labels = columns.tolist() else: column_labels = None column_label_names = columns.names index_names = [ name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names ] index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(index_names))] pdf = pdf.copy() pdf.index.names = index_columns reset_index = pdf.reset_index() reset_index.columns = index_columns + data_columns schema = StructType( [ StructField( name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any()), ) for name, col in reset_index.iteritems() ] ) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) sdf = default_session().createDataFrame(reset_index, schema=schema) return InternalFrame( spark_frame=sdf, index_map=OrderedDict(zip(index_columns, index_names)), column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, )
1
15,395
Shall we add some docstrings clearly to describe when to use which clearly? For example, `spark_frame` now should always be used via `df._internal.applied.spark_frame` for Spark DataFrame APIs that internally creates new query execution plan with the different output length. For expressions and/or functions, `df._internal.spark_frame` should be used together with Spark column instances, in order to avoid the operations on different DataFrames.
databricks-koalas
py
@@ -54,8 +54,8 @@ class DataFrame(_Frame): Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. - Note that if `data` is a Pandas DataFrame other arguments are ignored. - If data is a Spark DataFrame, all other arguments except `index` is ignored. + Note that if `data` is a Pandas DataFrame, other arguments should not be used + If `data` is a Spark DataFrame, all other arguments except `index` should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ import warnings from functools import partial, reduce from typing import Any, List, Tuple, Union import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.types import BooleanType, StructField, StructType, to_arrow_type from pyspark.sql.utils import AnalysisException from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function from databricks.koalas.exceptions import SparkPandasMergeError from databricks.koalas.generic import _Frame, max_display_count from databricks.koalas.metadata import Metadata from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.selection import SparkDataFrameLocator from databricks.koalas.typedef import infer_pd_series_spark_type class DataFrame(_Frame): """ Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _sdf: Spark Column instance :ivar _metadata: Metadata related to column names and index information. Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a Pandas DataFrame other arguments are ignored. If data is a Spark DataFrame, all other arguments except `index` is ignored. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided If `data` is a Spark DataFrame, `index` is expected to be `Metadata`. columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ks.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from Pandas DataFrame >>> df = ks.DataFrame(pd.DataFrame(data=d), columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ks.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, pd.DataFrame): self._init_from_pandas(data) elif isinstance(data, spark.DataFrame): self._init_from_spark(data, index) else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) self._init_from_pandas(pdf) def _init_from_pandas(self, pdf): metadata = Metadata.from_pandas(pdf) reset_index = pdf.reset_index() reset_index.columns = metadata.all_fields schema = StructType([StructField(name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema), metadata) def _init_from_spark(self, sdf, metadata=None): self._sdf = sdf if metadata is None: self._metadata = Metadata(column_fields=self._sdf.schema.fieldNames()) else: self._metadata = metadata @property def _index_columns(self): return [self._sdf.__getitem__(field) for field in self._metadata.index_fields] def _reduce_for_stat_function(self, sfun): """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. :param sfun: either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. """ from inspect import signature exprs = [] num_args = len(signature(sfun).parameters) for col in self.columns: col_sdf = self._sdf[col] col_type = self._sdf.schema[col].dataType if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'): # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast('integer') if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) exprs.append(col_sdf.alias(col)) sdf = self._sdf.select(*exprs) pdf = sdf.toPandas() assert len(pdf) == 1, (sdf, pdf) row = pdf.iloc[0] row.name = None return row # Return first row as a Series def corr(self, method='pearson'): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : pandas.DataFrame See Also -------- Series.corr Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ return corr(self, method) def iteritems(self): """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ cols = list(self.columns) return list((col_name, self[col_name]) for col_name in cols) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with Pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args) def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args) @property def index(self): """The index (row labels) Column of the DataFrame. Currently supported only when the DataFrame has a single index. """ from databricks.koalas.series import Series if len(self._metadata.index_info) != 1: raise KeyError('Currently supported only when the DataFrame has a single index.') return Series(self._index_columns[0], anchor=self, index=[]) def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. By default yields a new object. :param keys: column label or list of column labels / arrays :param drop: boolean, default True Delete columns to be used as the new index :param append: boolean, default False Whether to append columns to existing index :param inplace: boolean, default False Modify the DataFrame in place (do not create a new object) :return: :class:`DataFrame` """ if isinstance(keys, str): keys = [keys] else: keys = list(keys) for key in keys: if key not in self.columns: raise KeyError(key) if drop: columns = [column for column in self._metadata.column_fields if column not in keys] else: columns = self._metadata.column_fields if append: index_info = self._metadata.index_info + [(column, column) for column in keys] else: index_info = [(column, column) for column in keys] metadata = self._metadata.copy(column_fields=columns, index_info=index_info) if inplace: self._metadata = metadata else: kdf = self.copy() kdf._metadata = metadata return kdf def reset_index(self, level=None, drop=False, inplace=False): """For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. :param level: int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default :param drop: boolean, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. :param inplace: boolean, default False Modify the DataFrame in place (do not create a new object) :return: :class:`DataFrame` """ if len(self._metadata.index_info) == 0: raise NotImplementedError('Can\'t reset index because there is no index.') multi_index = len(self._metadata.index_info) > 1 if multi_index: rename = lambda i: 'level_{}'.format(i) else: rename = lambda i: \ 'index' if 'index' not in self._metadata.column_fields else 'level_{}'.fomat(i) if level is None: index_columns = [(column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._metadata.index_info)] index_info = [] else: if isinstance(level, (int, str)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for l in level: if l >= len(self._metadata.index_info): raise IndexError('Too many levels: Index has only {} level, not {}' .format(len(self._metadata.index_info), l + 1)) idx = level elif all(isinstance(l, str) for l in level): idx = [] for l in level: try: i = self._metadata.index_fields.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})' .format(self._metadata.index_fields[0])) else: raise ValueError('Level should be all int or all string.') idx.sort() index_columns = [] index_info = self._metadata.index_info.copy() for i in idx: info = self._metadata.index_info[i] column_field, index_name = info index_columns.append((column_field, index_name if index_name is not None else rename(index_name))) index_info.remove(info) if drop: index_columns = [] metadata = self._metadata.copy( column_fields=[column for column, _ in index_columns] + self._metadata.column_fields, index_info=index_info) columns = [name for _, name in index_columns] + self._metadata.column_fields if inplace: self._metadata = metadata self.columns = columns else: kdf = self.copy() kdf._metadata = metadata kdf.columns = columns return kdf def isnull(self): """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- Dataframe.notnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.isnull() return kdf isna = isnull def notnull(self): """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- Dataframe.isnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.notnull() return kdf notna = notnull def to_koalas(self): """ Converts the existing DataFrame into a Koalas DataFrame. This method is monkey-patched into Spark's DataFrame and can be used to convert a Spark DataFrame into a Koalas DataFrame. If running on an existing Koalas DataFrame, the method returns itself. If a Koalas DataFrame is converted to a Spark DataFrame and then back to Koalas, it will lose the index information and the original index will be turned into a normal column. See Also -------- DataFrame.to_spark Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 >>> spark_df = df.to_spark() >>> spark_df DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint] >>> kdf = spark_df.to_koalas() >>> kdf __index_level_0__ col1 col2 0 0 1 3 1 1 2 4 Calling to_koalas on a Koalas DataFrame simply returns itself. >>> df.to_koalas() col1 col2 0 1 3 1 2 4 """ if isinstance(self, DataFrame): return self else: return DataFrame(self) def to_spark(self): """ Return the current DataFrame as a Spark DataFrame. See Also -------- DataFrame.to_koalas """ return self._sdf def to_pandas(self): """ Return a Pandas DataFrame. .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.all_fields]) pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: # TODO: push to OSS pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in sdf.schema}) if len(self._metadata.index_info) > 0: append = False for index_field in self._metadata.index_fields: drop = index_field not in self._metadata.column_fields pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self._metadata.column_fields] index_names = self._metadata.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf # Alias to maintain backward compatibility with Spark toPandas = to_pandas def assign(self, **kwargs): """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though Koalas doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15) >>> assigned[['temp_c', 'temp_f', 'temp_k']] temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in Koalas. In Koalas, all items are computed first, and then assigned. """ from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) if callable(v): kwargs[k] = v(self) pairs = list(kwargs.items()) sdf = self._sdf for (name, c) in pairs: if isinstance(c, Series): sdf = sdf.withColumn(name, c._scol) elif isinstance(c, Column): sdf = sdf.withColumn(name, c) else: sdf = sdf.withColumn(name, F.lit(c)) metadata = self._metadata.copy( column_fields=(self._metadata.column_fields + [name for name, _ in pairs if name not in self._metadata.column_fields])) return DataFrame(sdf, metadata) @property def loc(self): return SparkDataFrameLocator(self) def copy(self): return DataFrame(self._sdf, self._metadata.copy()) def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ if axis == 0 or axis == 'index': if subset is not None: if isinstance(subset, str): columns = [subset] else: columns = list(subset) invalids = [column for column in columns if column not in self._metadata.column_fields] if len(invalids) > 0: raise KeyError(invalids) else: columns = list(self.columns) cnt = reduce(lambda x, y: x + y, [F.when(self[column].notna()._scol, 1).otherwise(0) for column in columns], F.lit(0)) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == 'any': pred = cnt == F.lit(len(columns)) elif how == 'all': pred = cnt > F.lit(0) else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') sdf = self._sdf.filter(pred) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") def fillna(self, value=None, axis=None, inplace=False): """Fill NA/NaN values. :param value: scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. :param axis: {0 or `index`} 1 and `columns` are not supported. :param inplace: boolean, default False Fill in place (do not create a new object) :return: :class:`DataFrame` Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ if axis is None: axis = 0 if not (axis == 0 or axis == "index"): raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if value is None: raise ValueError('Currently must specify value') if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value)) if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v)) sdf = self._sdf.fillna(value) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) def head(self, n=5): """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ return DataFrame(self._sdf.limit(n), self._metadata.copy()) @property def columns(self): """The column labels of the DataFrame.""" return pd.Index(self._metadata.column_fields) @columns.setter def columns(self, names): old_names = self._metadata.column_fields if len(old_names) != len(names): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(names))) sdf = self._sdf.select(self._metadata.index_fields + [self[old_name]._scol.alias(new_name) for (old_name, new_name) in zip(old_names, names)]) self._sdf = sdf self._metadata = self._metadata.copy(column_fields=names) @property def dtypes(self): """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. :return: :class:`pd.Series` The data type of each column. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series([self[col].dtype for col in self._metadata.column_fields], index=self._metadata.column_fields) def count(self): """ Count non-NA cells for each column. The values `None`, `NaN` are considered NA. Returns ------- pandas.Series See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}, ... columns=["Person", "Age", "Single"]) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 """ return self._reduce_for_stat_function(_Frame._count_expr) def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None): """ Drop specified labels from columns. Remove columns by specifying label names and axis=1 or columns. When specifying both labels and columns, only labels will be dropped. Removing rows is yet to be implemented. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {1 or 'columns'}, default 1 .. dropna currently only works for axis=1 'columns' axis=0 is yet to be implemented. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('x', axis=1) y z w 0 3 5 7 1 4 6 8 >>> df.drop(['y', 'z'], axis=1) x w 0 1 7 1 2 8 >>> df.drop(columns=['y', 'z']) x w 0 1 7 1 2 8 Notes ----- Currently only axis = 1 is supported in this function, axis = 0 is yet to be implemented. """ if labels is not None: axis = self._validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if isinstance(columns, str): columns = [columns] sdf = self._sdf.drop(*columns) metadata = self._metadata.copy( column_fields=[column for column in self.columns if column not in columns] ) return DataFrame(sdf, metadata) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def get(self, key, default=None): """ Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}, ... columns=['x', 'y', 'z']) >>> df x y z 0 0 a a 1 1 b b 2 2 b b >>> df.get('x') 0 0 1 1 2 2 Name: x, dtype: int64 >>> df.get(['x', 'y']) x y 0 0 a 1 1 b 2 2 b """ try: return self._pd_getitem(key) except (KeyError, ValueError, IndexError): return default def sort_values(self, by, ascending=True, inplace=False, na_position='last'): """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df col1 col2 col3 0 A 2 0 1 B 9 9 2 None 8 4 3 D 7 2 4 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 B 9 9 4 C 4 3 3 D 7 2 2 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 3 D 7 2 4 C 4 3 1 B 9 9 0 A 2 0 2 None 8 4 Sort by multiple columns >>> df = ks.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ if isinstance(by, str): by = [by] if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError('Length of ascending ({}) != length of by ({})' .format(len(ascending), len(by))) if na_position not in ('first', 'last'): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](self[colname]._scol) for colname, asc in zip(by, ascending)] kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy()) if inplace: self._sdf = kdf._sdf self._metadata = kdf._metadata else: return kdf def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns))) _select_columns = self._metadata.index_fields if isinstance(values, dict): for col in self.columns: if col in values: _select_columns.append(self[col]._scol.isin(values[col]).alias(col)) else: _select_columns.append(F.lit(False).alias(col)) elif is_list_like(values): _select_columns += [ self[col]._scol.isin(list(values)).alias(col) for col in self.columns] else: raise TypeError('Values should be iterable, Series, DataFrame or dict.') return DataFrame(self._sdf.select(_select_columns), self._metadata.copy()) def pipe(self, func, *args, **kwargs): r""" Apply func(self, \*args, \*\*kwargs). Parameters ---------- func : function function to apply to the DataFrame. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the DataFrames. args : iterable, optional positional arguments passed into ``func``. kwargs : mapping, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. For example, given >>> df = ks.DataFrame({'category': ['A', 'A', 'B'], ... 'col1': [1, 2, 3], ... 'col2': [4, 5, 6]}, ... columns=['category', 'col1', 'col2']) >>> def keep_category_a(df): ... return df[df['category'] == 'A'] >>> def add_one(df, column): ... return df.assign(col3=df[column] + 1) >>> def multiply(df, column1, column2): ... return df.assign(col4=df[column1] * df[column2]) instead of writing >>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3") category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 You can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe(multiply, column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``df``: >>> def multiply_2(column1, df, column2): ... return df.assign(col4=df[column1] * df[column2]) Then you can write >>> (df.pipe(keep_category_a) ... .pipe(add_one, column="col1") ... .pipe((multiply_2, 'df'), column1="col2", column2="col3") ... ) category col1 col2 col3 col4 0 A 1 4 2 8 1 A 2 5 3 15 """ if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame': """ Merge DataFrame objects with a database-style join. Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’ left: use only keys from left frame, similar to a SQL left outer join; preserve key order. right: use only keys from right frame, similar to a SQL right outer join; preserve key order. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. Examples -------- >>> left_kdf = ks.DataFrame({'A': [1, 2]}) >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_kdf.merge(right_kdf, left_index=True, right_index=True) A B 0 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left') A B 0 1 None 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right') A B 0 2.0 x 1 NaN y >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer') A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ if on is None and not left_index and not right_index: raise SparkPandasMergeError("At least 'on' or 'left_index' and 'right_index' have ", "to be set") if on is not None and (left_index or right_index): raise SparkPandasMergeError("Only 'on' or 'left_index' and 'right_index' can be set") if how == 'full': warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " + "instead to be compatible with the pandas merge API", UserWarning) if how == 'outer': # 'outer' in pandas equals 'full' in Spark how = 'full' if how not in ('inner', 'left', 'right', 'full'): raise ValueError("The 'how' parameter has to be amongst the following values: ", "['inner', 'left', 'right', 'outer']") if on is None: # FIXME Move index string to constant? on = '__index_level_0__' left_table = self._sdf.alias('left_table') right_table = right._sdf.alias('right_table') # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = list(self.columns & right.columns) if duplicate_columns: for duplicate_column_name in duplicate_columns: left_table = left_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + left_suffix) right_table = right_table.withColumnRenamed(duplicate_column_name, duplicate_column_name + right_suffix) join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns else left_table[on + left_suffix] == right_table[on + right_suffix]) joined_table = left_table.join(right_table, join_condition, how=how) if on in duplicate_columns: # Merge duplicate key columns joined_table = joined_table.withColumnRenamed(on + left_suffix, on) joined_table = joined_table.drop(on + right_suffix) # Remove auxiliary index # FIXME Move index string to constant? joined_table = joined_table.drop('__index_level_0__') kdf = DataFrame(joined_table) return kdf def _pd_getitem(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, str): try: return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_info) except AnalysisException: raise KeyError(key) if np.isscalar(key) or isinstance(key, (tuple, str)): raise NotImplementedError(key) elif isinstance(key, slice): return self.loc[key] if isinstance(key, (pd.Series, np.ndarray, pd.Index)): raise NotImplementedError(key) if isinstance(key, list): return self.loc[:, key] if isinstance(key, DataFrame): # TODO Should not implement alignment, too dangerous? return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_info) if isinstance(key, Series): # TODO Should not implement alignment, too dangerous? # It is assumed to be only a filter, otherwise .loc should be used. bcol = key._scol.cast("boolean") return DataFrame(self._sdf.filter(bcol), self._metadata.copy()) raise NotImplementedError(key) def __repr__(self): return repr(self.head(max_display_count).to_pandas()) def _repr_html_(self): return self.head(max_display_count).to_pandas()._repr_html_() def __getitem__(self, key): return self._pd_getitem(key) def __setitem__(self, key, value): from databricks.koalas.series import Series # For now, we don't support realignment against different dataframes. # This is too expensive in Spark. # Are we assigning against a column? if isinstance(value, Series): assert value._kdf is self, \ "Cannot combine column argument because it comes from a different dataframe" if isinstance(key, (tuple, list)): assert isinstance(value.schema, StructType) field_names = value.schema.fieldNames() kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)}) else: kdf = self.assign(**{key: value}) self._sdf = kdf._sdf self._metadata = kdf._metadata def __getattr__(self, key: str) -> Any: from databricks.koalas.series import Series if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_info) def __iter__(self): return self.toPandas().__iter__() def __len__(self): return self._sdf.count() def __dir__(self): fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f] return super(DataFrame, self).__dir__() + fields @classmethod def _validate_axis(cls, axis=0): if axis not in (0, 1, 'index', 'columns', None): raise ValueError('No axis named {0}'.format(axis)) # convert to numeric axis return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis) def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a dataframe, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2
1
9,017
nit: `.` at the end of line?
databricks-koalas
py
@@ -2012,7 +2012,16 @@ public class MessageListFragment extends Fragment implements OnItemClickListener private void updateFooterView() { if (!mSearch.isManualSearch() && mCurrentFolder != null && mAccount != null) { - if (mCurrentFolder.loading) { + int msg=100; + try { + msg=mCurrentFolder.folder.getMessageCount(); + } catch (MessagingException e) { + e.printStackTrace(); + } + if (msg==0){ + updateFooter(mContext.getString(R.string.no_email)); + } + else if (mCurrentFolder.loading) { updateFooter(mContext.getString(R.string.status_loading_more)); } else if (!mCurrentFolder.moreMessages) { updateFooter(null);
1
package com.fsck.k9.fragment; import java.lang.ref.WeakReference; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Future; import android.annotation.SuppressLint; import android.app.Activity; import android.app.DialogFragment; import android.app.Fragment; import android.app.LoaderManager; import android.app.LoaderManager.LoaderCallbacks; import android.content.BroadcastReceiver; import android.content.Context; import android.content.CursorLoader; import android.content.Intent; import android.content.IntentFilter; import android.content.Loader; import android.database.Cursor; import android.graphics.Color; import android.graphics.Rect; import android.graphics.Typeface; import android.graphics.drawable.Drawable; import android.net.Uri; import android.os.Bundle; import android.os.Handler; import android.os.Parcelable; import android.support.v4.content.LocalBroadcastManager; import android.text.Spannable; import android.text.SpannableStringBuilder; import android.text.TextUtils; import android.text.format.DateUtils; import android.text.style.AbsoluteSizeSpan; import android.text.style.ForegroundColorSpan; import android.util.Log; import android.util.TypedValue; import android.view.ActionMode; import android.view.ContextMenu; import android.view.ContextMenu.ContextMenuInfo; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.MotionEvent; import android.view.View; import android.view.ViewGroup; import android.view.Window; import android.widget.AdapterView; import android.widget.AdapterView.AdapterContextMenuInfo; import android.widget.AdapterView.OnItemClickListener; import android.widget.CheckBox; import android.widget.CursorAdapter; import android.widget.ListView; import android.widget.QuickContactBadge; import android.widget.TextView; import android.widget.Toast; import com.fsck.k9.Account; import com.fsck.k9.Account.SortType; import com.fsck.k9.BuildConfig; import com.fsck.k9.FontSizes; import com.fsck.k9.K9; import com.fsck.k9.Preferences; import com.fsck.k9.R; import com.fsck.k9.activity.ActivityListener; import com.fsck.k9.activity.ChooseFolder; import com.fsck.k9.activity.FolderInfoHolder; import com.fsck.k9.activity.MessageReference; import com.fsck.k9.activity.misc.ContactPictureLoader; import com.fsck.k9.cache.EmailProviderCache; import com.fsck.k9.controller.MessagingController; import com.fsck.k9.fragment.ConfirmationDialogFragment.ConfirmationDialogFragmentListener; import com.fsck.k9.fragment.MessageListFragmentComparators.ArrivalComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.AttachmentComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.ComparatorChain; import com.fsck.k9.fragment.MessageListFragmentComparators.DateComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.FlaggedComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.ReverseComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.ReverseIdComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.SenderComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.SubjectComparator; import com.fsck.k9.fragment.MessageListFragmentComparators.UnreadComparator; import com.fsck.k9.helper.ContactPicture; import com.fsck.k9.helper.MergeCursorWithUniqueId; import com.fsck.k9.helper.MessageHelper; import com.fsck.k9.helper.Utility; import com.fsck.k9.mail.Address; import com.fsck.k9.mail.Flag; import com.fsck.k9.mail.Folder; import com.fsck.k9.mail.Message; import com.fsck.k9.mail.MessagingException; import com.fsck.k9.mailstore.DatabasePreviewType; import com.fsck.k9.mailstore.LocalFolder; import com.fsck.k9.mailstore.LocalStore; import com.fsck.k9.preferences.StorageEditor; import com.fsck.k9.provider.EmailProvider; import com.fsck.k9.provider.EmailProvider.MessageColumns; import com.fsck.k9.provider.EmailProvider.SpecialColumns; import com.fsck.k9.provider.EmailProvider.ThreadColumns; import com.fsck.k9.search.ConditionsTreeNode; import com.fsck.k9.search.LocalSearch; import com.fsck.k9.search.SearchSpecification; import com.fsck.k9.search.SearchSpecification.SearchCondition; import com.fsck.k9.search.SearchSpecification.SearchField; import com.fsck.k9.search.SqlQueryBuilder; import com.handmark.pulltorefresh.library.ILoadingLayout; import com.handmark.pulltorefresh.library.PullToRefreshBase; import com.handmark.pulltorefresh.library.PullToRefreshListView; public class MessageListFragment extends Fragment implements OnItemClickListener, ConfirmationDialogFragmentListener, LoaderCallbacks<Cursor> { private static final String[] THREADED_PROJECTION = { MessageColumns.ID, MessageColumns.UID, MessageColumns.INTERNAL_DATE, MessageColumns.SUBJECT, MessageColumns.DATE, MessageColumns.SENDER_LIST, MessageColumns.TO_LIST, MessageColumns.CC_LIST, MessageColumns.READ, MessageColumns.FLAGGED, MessageColumns.ANSWERED, MessageColumns.FORWARDED, MessageColumns.ATTACHMENT_COUNT, MessageColumns.FOLDER_ID, MessageColumns.PREVIEW_TYPE, MessageColumns.PREVIEW, ThreadColumns.ROOT, SpecialColumns.ACCOUNT_UUID, SpecialColumns.FOLDER_NAME, SpecialColumns.THREAD_COUNT, }; private static final int ID_COLUMN = 0; private static final int UID_COLUMN = 1; static final int INTERNAL_DATE_COLUMN = 2; static final int SUBJECT_COLUMN = 3; static final int DATE_COLUMN = 4; private static final int SENDER_LIST_COLUMN = 5; private static final int TO_LIST_COLUMN = 6; private static final int CC_LIST_COLUMN = 7; static final int READ_COLUMN = 8; static final int FLAGGED_COLUMN = 9; private static final int ANSWERED_COLUMN = 10; private static final int FORWARDED_COLUMN = 11; static final int ATTACHMENT_COUNT_COLUMN = 12; private static final int FOLDER_ID_COLUMN = 13; private static final int PREVIEW_TYPE_COLUMN = 14; private static final int PREVIEW_COLUMN = 15; private static final int THREAD_ROOT_COLUMN = 16; private static final int ACCOUNT_UUID_COLUMN = 17; private static final int FOLDER_NAME_COLUMN = 18; private static final int THREAD_COUNT_COLUMN = 19; private static final String[] PROJECTION = Arrays.copyOf(THREADED_PROJECTION, THREAD_COUNT_COLUMN); public static MessageListFragment newInstance(LocalSearch search, boolean isThreadDisplay, boolean threadedList) { MessageListFragment fragment = new MessageListFragment(); Bundle args = new Bundle(); args.putParcelable(ARG_SEARCH, search); args.putBoolean(ARG_IS_THREAD_DISPLAY, isThreadDisplay); args.putBoolean(ARG_THREADED_LIST, threadedList); fragment.setArguments(args); return fragment; } private static final int ACTIVITY_CHOOSE_FOLDER_MOVE = 1; private static final int ACTIVITY_CHOOSE_FOLDER_COPY = 2; private static final String ARG_SEARCH = "searchObject"; private static final String ARG_THREADED_LIST = "threadedList"; private static final String ARG_IS_THREAD_DISPLAY = "isThreadedDisplay"; private static final String STATE_SELECTED_MESSAGES = "selectedMessages"; private static final String STATE_ACTIVE_MESSAGE = "activeMessage"; private static final String STATE_REMOTE_SEARCH_PERFORMED = "remoteSearchPerformed"; private static final String STATE_MESSAGE_LIST = "listState"; /** * Maps a {@link SortType} to a {@link Comparator} implementation. */ private static final Map<SortType, Comparator<Cursor>> SORT_COMPARATORS; static { // fill the mapping at class time loading final Map<SortType, Comparator<Cursor>> map = new EnumMap<>(SortType.class); map.put(SortType.SORT_ATTACHMENT, new AttachmentComparator()); map.put(SortType.SORT_DATE, new DateComparator()); map.put(SortType.SORT_ARRIVAL, new ArrivalComparator()); map.put(SortType.SORT_FLAGGED, new FlaggedComparator()); map.put(SortType.SORT_SUBJECT, new SubjectComparator()); map.put(SortType.SORT_SENDER, new SenderComparator()); map.put(SortType.SORT_UNREAD, new UnreadComparator()); // make it immutable to prevent accidental alteration (content is immutable already) SORT_COMPARATORS = Collections.unmodifiableMap(map); } private ListView mListView; private PullToRefreshListView mPullToRefreshView; private Parcelable mSavedListState; private int mPreviewLines = 0; private MessageListAdapter mAdapter; private View mFooterView; private FolderInfoHolder mCurrentFolder; private LayoutInflater mInflater; private MessagingController mController; private Account mAccount; private String[] mAccountUuids; private int mUnreadMessageCount = 0; private Cursor[] mCursors; private boolean[] mCursorValid; private int mUniqueIdColumn; /** * Stores the name of the folder that we want to open as soon as possible after load. */ private String mFolderName; private boolean mRemoteSearchPerformed = false; private Future<?> mRemoteSearchFuture = null; public List<Message> mExtraSearchResults; private String mTitle; private LocalSearch mSearch = null; private boolean mSingleAccountMode; private boolean mSingleFolderMode; private boolean mAllAccounts; private MessageListHandler mHandler = new MessageListHandler(this); private SortType mSortType = SortType.SORT_DATE; private boolean mSortAscending = true; private boolean mSortDateAscending = false; private boolean mSenderAboveSubject = false; private boolean mCheckboxes = true; private boolean mStars = true; private int mSelectedCount = 0; private Set<Long> mSelected = new HashSet<>(); private FontSizes mFontSizes = K9.getFontSizes(); private ActionMode mActionMode; private Boolean mHasConnectivity; /** * Relevant messages for the current context when we have to remember the chosen messages * between user interactions (e.g. selecting a folder for move operation). */ private List<MessageReference> mActiveMessages; /* package visibility for faster inner class access */ MessageHelper mMessageHelper; private ActionModeCallback mActionModeCallback = new ActionModeCallback(); private MessageListFragmentListener mFragmentListener; private boolean mThreadedList; private boolean mIsThreadDisplay; private Context mContext; private final ActivityListener mListener = new MessageListActivityListener(); private Preferences mPreferences; private boolean mLoaderJustInitialized; private MessageReference mActiveMessage; /** * {@code true} after {@link #onCreate(Bundle)} was executed. Used in {@link #updateTitle()} to * make sure we don't access member variables before initialization is complete. */ private boolean mInitialized = false; private ContactPictureLoader mContactsPictureLoader; private LocalBroadcastManager mLocalBroadcastManager; private BroadcastReceiver mCacheBroadcastReceiver; private IntentFilter mCacheIntentFilter; /** * Stores the unique ID of the message the context menu was opened for. * * We have to save this because the message list might change between the time the menu was * opened and when the user clicks on a menu item. When this happens the 'adapter position' that * is accessible via the {@code ContextMenu} object might correspond to another list item and we * would end up using/modifying the wrong message. * * The value of this field is {@code 0} when no context menu is currently open. */ private long mContextMenuUniqueId = 0; /** * This class is used to run operations that modify UI elements in the UI thread. * * <p>We are using convenience methods that add a {@link android.os.Message} instance or a * {@link Runnable} to the message queue.</p> * * <p><strong>Note:</strong> If you add a method to this class make sure you don't accidentally * perform the operation in the calling thread.</p> */ static class MessageListHandler extends Handler { private static final int ACTION_FOLDER_LOADING = 1; private static final int ACTION_REFRESH_TITLE = 2; private static final int ACTION_PROGRESS = 3; private static final int ACTION_REMOTE_SEARCH_FINISHED = 4; private static final int ACTION_GO_BACK = 5; private static final int ACTION_RESTORE_LIST_POSITION = 6; private static final int ACTION_OPEN_MESSAGE = 7; private WeakReference<MessageListFragment> mFragment; public MessageListHandler(MessageListFragment fragment) { mFragment = new WeakReference<>(fragment); } public void folderLoading(String folder, boolean loading) { android.os.Message msg = android.os.Message.obtain(this, ACTION_FOLDER_LOADING, (loading) ? 1 : 0, 0, folder); sendMessage(msg); } public void refreshTitle() { android.os.Message msg = android.os.Message.obtain(this, ACTION_REFRESH_TITLE); sendMessage(msg); } public void progress(final boolean progress) { android.os.Message msg = android.os.Message.obtain(this, ACTION_PROGRESS, (progress) ? 1 : 0, 0); sendMessage(msg); } public void remoteSearchFinished() { android.os.Message msg = android.os.Message.obtain(this, ACTION_REMOTE_SEARCH_FINISHED); sendMessage(msg); } public void updateFooter(final String message) { post(new Runnable() { @Override public void run() { MessageListFragment fragment = mFragment.get(); if (fragment != null) { fragment.updateFooter(message); } } }); } public void goBack() { android.os.Message msg = android.os.Message.obtain(this, ACTION_GO_BACK); sendMessage(msg); } public void restoreListPosition() { MessageListFragment fragment = mFragment.get(); if (fragment != null) { android.os.Message msg = android.os.Message.obtain(this, ACTION_RESTORE_LIST_POSITION, fragment.mSavedListState); fragment.mSavedListState = null; sendMessage(msg); } } public void openMessage(MessageReference messageReference) { android.os.Message msg = android.os.Message.obtain(this, ACTION_OPEN_MESSAGE, messageReference); sendMessage(msg); } @Override public void handleMessage(android.os.Message msg) { MessageListFragment fragment = mFragment.get(); if (fragment == null) { return; } // The following messages don't need an attached activity. switch (msg.what) { case ACTION_REMOTE_SEARCH_FINISHED: { fragment.remoteSearchFinished(); return; } } // Discard messages if the fragment isn't attached to an activity anymore. Activity activity = fragment.getActivity(); if (activity == null) { return; } switch (msg.what) { case ACTION_FOLDER_LOADING: { String folder = (String) msg.obj; boolean loading = (msg.arg1 == 1); fragment.folderLoading(folder, loading); break; } case ACTION_REFRESH_TITLE: { fragment.updateTitle(); break; } case ACTION_PROGRESS: { boolean progress = (msg.arg1 == 1); fragment.progress(progress); break; } case ACTION_GO_BACK: { fragment.mFragmentListener.goBack(); break; } case ACTION_RESTORE_LIST_POSITION: { fragment.mListView.onRestoreInstanceState((Parcelable) msg.obj); break; } case ACTION_OPEN_MESSAGE: { MessageReference messageReference = (MessageReference) msg.obj; fragment.mFragmentListener.openMessage(messageReference); break; } } } } /** * @return The comparator to use to display messages in an ordered * fashion. Never {@code null}. */ protected Comparator<Cursor> getComparator() { final List<Comparator<Cursor>> chain = new ArrayList<>(3 /* we add 3 comparators at most */); // Add the specified comparator final Comparator<Cursor> comparator = SORT_COMPARATORS.get(mSortType); if (mSortAscending) { chain.add(comparator); } else { chain.add(new ReverseComparator<>(comparator)); } // Add the date comparator if not already specified if (mSortType != SortType.SORT_DATE && mSortType != SortType.SORT_ARRIVAL) { final Comparator<Cursor> dateComparator = SORT_COMPARATORS.get(SortType.SORT_DATE); if (mSortDateAscending) { chain.add(dateComparator); } else { chain.add(new ReverseComparator<>(dateComparator)); } } // Add the id comparator chain.add(new ReverseIdComparator()); // Build the comparator chain return new ComparatorChain<>(chain); } private void folderLoading(String folder, boolean loading) { if (mCurrentFolder != null && mCurrentFolder.name.equals(folder)) { mCurrentFolder.loading = loading; } updateMoreMessagesOfCurrentFolder(); updateFooterView(); } public void updateTitle() { if (!mInitialized) { return; } setWindowTitle(); if (!mSearch.isManualSearch()) { setWindowProgress(); } } private void setWindowProgress() { int level = Window.PROGRESS_END; if (mCurrentFolder != null && mCurrentFolder.loading && mListener.getFolderTotal() > 0) { int divisor = mListener.getFolderTotal(); if (divisor != 0) { level = (Window.PROGRESS_END / divisor) * (mListener.getFolderCompleted()) ; if (level > Window.PROGRESS_END) { level = Window.PROGRESS_END; } } } mFragmentListener.setMessageListProgress(level); } private void setWindowTitle() { // regular folder content display if (!isManualSearch() && mSingleFolderMode) { Activity activity = getActivity(); String displayName = FolderInfoHolder.getDisplayName(activity, mAccount, mFolderName); mFragmentListener.setMessageListTitle(displayName); String operation = mListener.getOperation(activity); if (operation.length() < 1) { mFragmentListener.setMessageListSubTitle(mAccount.getEmail()); } else { mFragmentListener.setMessageListSubTitle(operation); } } else { // query result display. This may be for a search folder as opposed to a user-initiated search. if (mTitle != null) { // This was a search folder; the search folder has overridden our title. mFragmentListener.setMessageListTitle(mTitle); } else { // This is a search result; set it to the default search result line. mFragmentListener.setMessageListTitle(getString(R.string.search_results)); } mFragmentListener.setMessageListSubTitle(null); } // set unread count if (mUnreadMessageCount <= 0) { mFragmentListener.setUnreadCount(0); } else { if (!mSingleFolderMode && mTitle == null) { // The unread message count is easily confused // with total number of messages in the search result, so let's hide it. mFragmentListener.setUnreadCount(0); } else { mFragmentListener.setUnreadCount(mUnreadMessageCount); } } } private void progress(final boolean progress) { mFragmentListener.enableActionBarProgress(progress); if (mPullToRefreshView != null && !progress) { mPullToRefreshView.onRefreshComplete(); } } @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { if (view == mFooterView) { if (mCurrentFolder != null && !mSearch.isManualSearch() && mCurrentFolder.moreMessages) { mController.loadMoreMessages(mAccount, mFolderName, null); } else if (mCurrentFolder != null && isRemoteSearch() && mExtraSearchResults != null && mExtraSearchResults.size() > 0) { int numResults = mExtraSearchResults.size(); int limit = mAccount.getRemoteSearchNumResults(); List<Message> toProcess = mExtraSearchResults; if (limit > 0 && numResults > limit) { toProcess = toProcess.subList(0, limit); mExtraSearchResults = mExtraSearchResults.subList(limit, mExtraSearchResults.size()); } else { mExtraSearchResults = null; updateFooter(null); } mController.loadSearchResults(mAccount, mCurrentFolder.name, toProcess, mListener); } return; } Cursor cursor = (Cursor) parent.getItemAtPosition(position); if (cursor == null) { return; } if (mSelectedCount > 0) { toggleMessageSelect(position); } else { if (mThreadedList && cursor.getInt(THREAD_COUNT_COLUMN) > 1) { Account account = getAccountFromCursor(cursor); String folderName = cursor.getString(FOLDER_NAME_COLUMN); // If threading is enabled and this item represents a thread, display the thread contents. long rootId = cursor.getLong(THREAD_ROOT_COLUMN); mFragmentListener.showThread(account, folderName, rootId); } else { // This item represents a message; just display the message. openMessageAtPosition(listViewToAdapterPosition(position)); } } } @Override public void onAttach(Activity activity) { super.onAttach(activity); mContext = activity.getApplicationContext(); try { mFragmentListener = (MessageListFragmentListener) activity; } catch (ClassCastException e) { throw new ClassCastException(activity.getClass() + " must implement MessageListFragmentListener"); } } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Context appContext = getActivity().getApplicationContext(); mPreferences = Preferences.getPreferences(appContext); mController = MessagingController.getInstance(getActivity().getApplication()); mPreviewLines = K9.messageListPreviewLines(); mCheckboxes = K9.messageListCheckboxes(); mStars = K9.messageListStars(); if (K9.showContactPicture()) { mContactsPictureLoader = ContactPicture.getContactPictureLoader(getActivity()); } restoreInstanceState(savedInstanceState); decodeArguments(); createCacheBroadcastReceiver(appContext); mInitialized = true; } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { mInflater = inflater; View view = inflater.inflate(R.layout.message_list_fragment, container, false); initializePullToRefresh(inflater, view); initializeLayout(); mListView.setVerticalFadingEdgeEnabled(false); return view; } @Override public void onDestroyView() { mSavedListState = mListView.onSaveInstanceState(); super.onDestroyView(); } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); mMessageHelper = MessageHelper.getInstance(getActivity()); initializeMessageList(); // This needs to be done before initializing the cursor loader below initializeSortSettings(); mLoaderJustInitialized = true; LoaderManager loaderManager = getLoaderManager(); int len = mAccountUuids.length; mCursors = new Cursor[len]; mCursorValid = new boolean[len]; for (int i = 0; i < len; i++) { loaderManager.initLoader(i, null, this); mCursorValid[i] = false; } } @Override public void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); saveSelectedMessages(outState); saveListState(outState); outState.putBoolean(STATE_REMOTE_SEARCH_PERFORMED, mRemoteSearchPerformed); outState.putParcelable(STATE_ACTIVE_MESSAGE, mActiveMessage); } /** * Restore the state of a previous {@link MessageListFragment} instance. * * @see #onSaveInstanceState(Bundle) */ private void restoreInstanceState(Bundle savedInstanceState) { if (savedInstanceState == null) { return; } restoreSelectedMessages(savedInstanceState); mRemoteSearchPerformed = savedInstanceState.getBoolean(STATE_REMOTE_SEARCH_PERFORMED); mSavedListState = savedInstanceState.getParcelable(STATE_MESSAGE_LIST); mActiveMessage = savedInstanceState.getParcelable(STATE_ACTIVE_MESSAGE); } /** * Write the unique IDs of selected messages to a {@link Bundle}. */ private void saveSelectedMessages(Bundle outState) { long[] selected = new long[mSelected.size()]; int i = 0; for (Long id : mSelected) { selected[i++] = id; } outState.putLongArray(STATE_SELECTED_MESSAGES, selected); } /** * Restore selected messages from a {@link Bundle}. */ private void restoreSelectedMessages(Bundle savedInstanceState) { long[] selected = savedInstanceState.getLongArray(STATE_SELECTED_MESSAGES); for (long id : selected) { mSelected.add(Long.valueOf(id)); } } private void saveListState(Bundle outState) { if (mSavedListState != null) { // The previously saved state was never restored, so just use that. outState.putParcelable(STATE_MESSAGE_LIST, mSavedListState); } else if (mListView != null) { outState.putParcelable(STATE_MESSAGE_LIST, mListView.onSaveInstanceState()); } } private void initializeSortSettings() { if (mSingleAccountMode) { mSortType = mAccount.getSortType(); mSortAscending = mAccount.isSortAscending(mSortType); mSortDateAscending = mAccount.isSortAscending(SortType.SORT_DATE); } else { mSortType = K9.getSortType(); mSortAscending = K9.isSortAscending(mSortType); mSortDateAscending = K9.isSortAscending(SortType.SORT_DATE); } } private void decodeArguments() { Bundle args = getArguments(); mThreadedList = args.getBoolean(ARG_THREADED_LIST, false); mIsThreadDisplay = args.getBoolean(ARG_IS_THREAD_DISPLAY, false); mSearch = args.getParcelable(ARG_SEARCH); mTitle = mSearch.getName(); String[] accountUuids = mSearch.getAccountUuids(); mSingleAccountMode = false; if (accountUuids.length == 1 && !mSearch.searchAllAccounts()) { mSingleAccountMode = true; mAccount = mPreferences.getAccount(accountUuids[0]); } mSingleFolderMode = false; if (mSingleAccountMode && (mSearch.getFolderNames().size() == 1)) { mSingleFolderMode = true; mFolderName = mSearch.getFolderNames().get(0); mCurrentFolder = getFolderInfoHolder(mFolderName, mAccount); } mAllAccounts = false; if (mSingleAccountMode) { mAccountUuids = new String[] { mAccount.getUuid() }; } else { if (accountUuids.length == 1 && accountUuids[0].equals(SearchSpecification.ALL_ACCOUNTS)) { mAllAccounts = true; List<Account> accounts = mPreferences.getAccounts(); mAccountUuids = new String[accounts.size()]; for (int i = 0, len = accounts.size(); i < len; i++) { mAccountUuids[i] = accounts.get(i).getUuid(); } if (mAccountUuids.length == 1) { mSingleAccountMode = true; mAccount = accounts.get(0); } } else { mAccountUuids = accountUuids; } } } private void initializeMessageList() { mAdapter = new MessageListAdapter(); if (mFolderName != null) { mCurrentFolder = getFolderInfoHolder(mFolderName, mAccount); } if (mSingleFolderMode) { mListView.addFooterView(getFooterView(mListView)); updateFooterView(); } mListView.setAdapter(mAdapter); } private void createCacheBroadcastReceiver(Context appContext) { mLocalBroadcastManager = LocalBroadcastManager.getInstance(appContext); mCacheBroadcastReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { mAdapter.notifyDataSetChanged(); } }; mCacheIntentFilter = new IntentFilter(EmailProviderCache.ACTION_CACHE_UPDATED); } private FolderInfoHolder getFolderInfoHolder(String folderName, Account account) { try { LocalFolder localFolder = getFolder(folderName, account); return new FolderInfoHolder(mContext, localFolder, account); } catch (MessagingException e) { throw new RuntimeException(e); } } private LocalFolder getFolder(String folderName, Account account) throws MessagingException { LocalStore localStore = account.getLocalStore(); LocalFolder localFolder = localStore.getFolder(folderName); localFolder.open(Folder.OPEN_MODE_RO); return localFolder; } @Override public void onPause() { super.onPause(); mLocalBroadcastManager.unregisterReceiver(mCacheBroadcastReceiver); mListener.onPause(getActivity()); mController.removeListener(mListener); } /** * On resume we refresh messages for the folder that is currently open. * This guarantees that things like unread message count and read status * are updated. */ @Override public void onResume() { super.onResume(); Context appContext = getActivity().getApplicationContext(); mSenderAboveSubject = K9.messageListSenderAboveSubject(); if (!mLoaderJustInitialized) { restartLoader(); } else { mLoaderJustInitialized = false; } // Check if we have connectivity. Cache the value. if (mHasConnectivity == null) { mHasConnectivity = Utility.hasConnectivity(getActivity().getApplication()); } mLocalBroadcastManager.registerReceiver(mCacheBroadcastReceiver, mCacheIntentFilter); mListener.onResume(getActivity()); mController.addListener(mListener); //Cancel pending new mail notifications when we open an account List<Account> accountsWithNotification; Account account = mAccount; if (account != null) { accountsWithNotification = Collections.singletonList(account); } else { accountsWithNotification = mPreferences.getAccounts(); } for (Account accountWithNotification : accountsWithNotification) { mController.cancelNotificationsForAccount(accountWithNotification); } if (mAccount != null && mFolderName != null && !mSearch.isManualSearch()) { mController.getFolderUnreadMessageCount(mAccount, mFolderName, mListener); } updateTitle(); } private void restartLoader() { if (mCursorValid == null) { return; } // Refresh the message list LoaderManager loaderManager = getLoaderManager(); for (int i = 0; i < mAccountUuids.length; i++) { loaderManager.restartLoader(i, null, this); mCursorValid[i] = false; } } private void initializePullToRefresh(LayoutInflater inflater, View layout) { mPullToRefreshView = (PullToRefreshListView) layout.findViewById(R.id.message_list); @SuppressLint("InflateParams") View loadingView = inflater.inflate(R.layout.message_list_loading, null); mPullToRefreshView.setEmptyView(loadingView); if (isRemoteSearchAllowed()) { // "Pull to search server" mPullToRefreshView.setOnRefreshListener( new PullToRefreshBase.OnRefreshListener<ListView>() { @Override public void onRefresh(PullToRefreshBase<ListView> refreshView) { mPullToRefreshView.onRefreshComplete(); onRemoteSearchRequested(); } }); ILoadingLayout proxy = mPullToRefreshView.getLoadingLayoutProxy(); proxy.setPullLabel(getString( R.string.pull_to_refresh_remote_search_from_local_search_pull)); proxy.setReleaseLabel(getString( R.string.pull_to_refresh_remote_search_from_local_search_release)); } else if (isCheckMailSupported()) { // "Pull to refresh" mPullToRefreshView.setOnRefreshListener( new PullToRefreshBase.OnRefreshListener<ListView>() { @Override public void onRefresh(PullToRefreshBase<ListView> refreshView) { checkMail(); } }); } // Disable pull-to-refresh until the message list has been loaded setPullToRefreshEnabled(false); } /** * Enable or disable pull-to-refresh. * * @param enable * {@code true} to enable. {@code false} to disable. */ private void setPullToRefreshEnabled(boolean enable) { mPullToRefreshView.setMode((enable) ? PullToRefreshBase.Mode.PULL_FROM_START : PullToRefreshBase.Mode.DISABLED); } private void initializeLayout() { mListView = mPullToRefreshView.getRefreshableView(); mListView.setScrollBarStyle(View.SCROLLBARS_INSIDE_OVERLAY); mListView.setLongClickable(true); mListView.setFastScrollEnabled(true); mListView.setScrollingCacheEnabled(false); mListView.setOnItemClickListener(this); registerForContextMenu(mListView); } public void onCompose() { if (!mSingleAccountMode) { /* * If we have a query string, we don't have an account to let * compose start the default action. */ mFragmentListener.onCompose(null); } else { mFragmentListener.onCompose(mAccount); } } public void onReply(MessageReference messageReference) { mFragmentListener.onReply(messageReference); } public void onReplyAll(MessageReference messageReference) { mFragmentListener.onReplyAll(messageReference); } public void onForward(MessageReference messageReference) { mFragmentListener.onForward(messageReference); } public void onResendMessage(MessageReference messageReference) { mFragmentListener.onResendMessage(messageReference); } public void changeSort(SortType sortType) { Boolean sortAscending = (mSortType == sortType) ? !mSortAscending : null; changeSort(sortType, sortAscending); } /** * User has requested a remote search. Setup the bundle and start the intent. */ public void onRemoteSearchRequested() { String searchAccount; String searchFolder; searchAccount = mAccount.getUuid(); searchFolder = mCurrentFolder.name; String queryString = mSearch.getRemoteSearchArguments(); mRemoteSearchPerformed = true; mRemoteSearchFuture = mController.searchRemoteMessages(searchAccount, searchFolder, queryString, null, null, mListener); setPullToRefreshEnabled(false); mFragmentListener.remoteSearchStarted(); } /** * Change the sort type and sort order used for the message list. * * @param sortType * Specifies which field to use for sorting the message list. * @param sortAscending * Specifies the sort order. If this argument is {@code null} the default search order * for the sort type is used. */ // FIXME: Don't save the changes in the UI thread private void changeSort(SortType sortType, Boolean sortAscending) { mSortType = sortType; Account account = mAccount; if (account != null) { account.setSortType(mSortType); if (sortAscending == null) { mSortAscending = account.isSortAscending(mSortType); } else { mSortAscending = sortAscending; } account.setSortAscending(mSortType, mSortAscending); mSortDateAscending = account.isSortAscending(SortType.SORT_DATE); account.save(mPreferences); } else { K9.setSortType(mSortType); if (sortAscending == null) { mSortAscending = K9.isSortAscending(mSortType); } else { mSortAscending = sortAscending; } K9.setSortAscending(mSortType, mSortAscending); mSortDateAscending = K9.isSortAscending(SortType.SORT_DATE); StorageEditor editor = mPreferences.getStorage().edit(); K9.save(editor); editor.commit(); } reSort(); } private void reSort() { int toastString = mSortType.getToast(mSortAscending); Toast toast = Toast.makeText(getActivity(), toastString, Toast.LENGTH_SHORT); toast.show(); LoaderManager loaderManager = getLoaderManager(); for (int i = 0, len = mAccountUuids.length; i < len; i++) { loaderManager.restartLoader(i, null, this); } } public void onCycleSort() { SortType[] sorts = SortType.values(); int curIndex = 0; for (int i = 0; i < sorts.length; i++) { if (sorts[i] == mSortType) { curIndex = i; break; } } curIndex++; if (curIndex == sorts.length) { curIndex = 0; } changeSort(sorts[curIndex]); } private void onDelete(MessageReference message) { onDelete(Collections.singletonList(message)); } private void onDelete(List<MessageReference> messages) { if (K9.confirmDelete()) { // remember the message selection for #onCreateDialog(int) mActiveMessages = messages; showDialog(R.id.dialog_confirm_delete); } else { onDeleteConfirmed(messages); } } private void onDeleteConfirmed(List<MessageReference> messages) { if (mThreadedList) { mController.deleteThreads(messages); } else { mController.deleteMessages(messages, null); } } @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { if (resultCode != Activity.RESULT_OK) { return; } switch (requestCode) { case ACTIVITY_CHOOSE_FOLDER_MOVE: case ACTIVITY_CHOOSE_FOLDER_COPY: { if (data == null) { return; } final String destFolderName = data.getStringExtra(ChooseFolder.EXTRA_NEW_FOLDER); final List<MessageReference> messages = mActiveMessages; if (destFolderName != null) { mActiveMessages = null; // don't need it any more if (messages.size() > 0) { try { MessageReference firstMsg = messages.get(0); Account account = mPreferences.getAccount(firstMsg.getAccountUuid()); LocalFolder firstMsgFolder = getFolder(firstMsg.getFolderName(), account); firstMsgFolder.setLastSelectedFolderName(destFolderName); } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Error getting folder for setLastSelectedFolderName()", e); } } switch (requestCode) { case ACTIVITY_CHOOSE_FOLDER_MOVE: move(messages, destFolderName); break; case ACTIVITY_CHOOSE_FOLDER_COPY: copy(messages, destFolderName); break; } } break; } } } public void onExpunge() { if (mCurrentFolder != null) { onExpunge(mAccount, mCurrentFolder.name); } } private void onExpunge(final Account account, String folderName) { mController.expunge(account, folderName); } private void showDialog(int dialogId) { DialogFragment fragment; switch (dialogId) { case R.id.dialog_confirm_spam: { String title = getString(R.string.dialog_confirm_spam_title); int selectionSize = mActiveMessages.size(); String message = getResources().getQuantityString( R.plurals.dialog_confirm_spam_message, selectionSize, Integer.valueOf(selectionSize)); String confirmText = getString(R.string.dialog_confirm_spam_confirm_button); String cancelText = getString(R.string.dialog_confirm_spam_cancel_button); fragment = ConfirmationDialogFragment.newInstance(dialogId, title, message, confirmText, cancelText); break; } case R.id.dialog_confirm_delete: { String title = getString(R.string.dialog_confirm_delete_title); int selectionSize = mActiveMessages.size(); String message = getResources().getQuantityString( R.plurals.dialog_confirm_delete_messages, selectionSize, Integer.valueOf(selectionSize)); String confirmText = getString(R.string.dialog_confirm_delete_confirm_button); String cancelText = getString(R.string.dialog_confirm_delete_cancel_button); fragment = ConfirmationDialogFragment.newInstance(dialogId, title, message, confirmText, cancelText); break; } case R.id.dialog_confirm_mark_all_as_read: { String title = getString(R.string.dialog_confirm_mark_all_as_read_title); String message = getString(R.string.dialog_confirm_mark_all_as_read_message); String confirmText = getString(R.string.dialog_confirm_mark_all_as_read_confirm_button); String cancelText = getString(R.string.dialog_confirm_mark_all_as_read_cancel_button); fragment = ConfirmationDialogFragment.newInstance(dialogId, title, message, confirmText, cancelText); break; } default: { throw new RuntimeException("Called showDialog(int) with unknown dialog id."); } } fragment.setTargetFragment(this, dialogId); fragment.show(getFragmentManager(), getDialogTag(dialogId)); } private String getDialogTag(int dialogId) { return "dialog-" + dialogId; } @Override public boolean onOptionsItemSelected(MenuItem item) { int itemId = item.getItemId(); switch (itemId) { case R.id.set_sort_date: { changeSort(SortType.SORT_DATE); return true; } case R.id.set_sort_arrival: { changeSort(SortType.SORT_ARRIVAL); return true; } case R.id.set_sort_subject: { changeSort(SortType.SORT_SUBJECT); return true; } case R.id.set_sort_sender: { changeSort(SortType.SORT_SENDER); return true; } case R.id.set_sort_flag: { changeSort(SortType.SORT_FLAGGED); return true; } case R.id.set_sort_unread: { changeSort(SortType.SORT_UNREAD); return true; } case R.id.set_sort_attach: { changeSort(SortType.SORT_ATTACHMENT); return true; } case R.id.select_all: { selectAll(); return true; } } if (!mSingleAccountMode) { // None of the options after this point are "safe" for search results //TODO: This is not true for "unread" and "starred" searches in regular folders return false; } switch (itemId) { case R.id.send_messages: { onSendPendingMessages(); return true; } case R.id.expunge: { if (mCurrentFolder != null) { onExpunge(mAccount, mCurrentFolder.name); } return true; } default: { return super.onOptionsItemSelected(item); } } } public void onSendPendingMessages() { mController.sendPendingMessages(mAccount, null); } @Override public boolean onContextItemSelected(android.view.MenuItem item) { if (mContextMenuUniqueId == 0) { return false; } int adapterPosition = getPositionForUniqueId(mContextMenuUniqueId); if (adapterPosition == AdapterView.INVALID_POSITION) { return false; } switch (item.getItemId()) { case R.id.deselect: case R.id.select: { toggleMessageSelectWithAdapterPosition(adapterPosition); break; } case R.id.reply: { onReply(getMessageAtPosition(adapterPosition)); break; } case R.id.reply_all: { onReplyAll(getMessageAtPosition(adapterPosition)); break; } case R.id.forward: { onForward(getMessageAtPosition(adapterPosition)); break; } case R.id.send_again: { onResendMessage(getMessageAtPosition(adapterPosition)); mSelectedCount = 0; break; } case R.id.same_sender: { Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition); String senderAddress = getSenderAddressFromCursor(cursor); if (senderAddress != null) { mFragmentListener.showMoreFromSameSender(senderAddress); } break; } case R.id.delete: { MessageReference message = getMessageAtPosition(adapterPosition); onDelete(message); break; } case R.id.mark_as_read: { setFlag(adapterPosition, Flag.SEEN, true); break; } case R.id.mark_as_unread: { setFlag(adapterPosition, Flag.SEEN, false); break; } case R.id.flag: { setFlag(adapterPosition, Flag.FLAGGED, true); break; } case R.id.unflag: { setFlag(adapterPosition, Flag.FLAGGED, false); break; } // only if the account supports this case R.id.archive: { onArchive(getMessageAtPosition(adapterPosition)); break; } case R.id.spam: { onSpam(getMessageAtPosition(adapterPosition)); break; } case R.id.move: { onMove(getMessageAtPosition(adapterPosition)); break; } case R.id.copy: { onCopy(getMessageAtPosition(adapterPosition)); break; } // debug options case R.id.debug_delete_locally: { onDebugClearLocally(getMessageAtPosition(adapterPosition)); break; } } mContextMenuUniqueId = 0; return true; } static String getSenderAddressFromCursor(Cursor cursor) { String fromList = cursor.getString(SENDER_LIST_COLUMN); Address[] fromAddrs = Address.unpack(fromList); return (fromAddrs.length > 0) ? fromAddrs[0].getAddress() : null; } @Override public void onCreateContextMenu(ContextMenu menu, View v, ContextMenuInfo menuInfo) { super.onCreateContextMenu(menu, v, menuInfo); AdapterContextMenuInfo info = (AdapterContextMenuInfo) menuInfo; Cursor cursor = (Cursor) mListView.getItemAtPosition(info.position); if (cursor == null) { return; } getActivity().getMenuInflater().inflate(R.menu.message_list_item_context, menu); menu.findItem(R.id.debug_delete_locally).setVisible(BuildConfig.DEBUG); mContextMenuUniqueId = cursor.getLong(mUniqueIdColumn); Account account = getAccountFromCursor(cursor); String subject = cursor.getString(SUBJECT_COLUMN); boolean read = (cursor.getInt(READ_COLUMN) == 1); boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1); menu.setHeaderTitle(subject); if( mSelected.contains(mContextMenuUniqueId)) { menu.findItem(R.id.select).setVisible(false); } else { menu.findItem(R.id.deselect).setVisible(false); } if (read) { menu.findItem(R.id.mark_as_read).setVisible(false); } else { menu.findItem(R.id.mark_as_unread).setVisible(false); } if (flagged) { menu.findItem(R.id.flag).setVisible(false); } else { menu.findItem(R.id.unflag).setVisible(false); } if (!mController.isCopyCapable(account)) { menu.findItem(R.id.copy).setVisible(false); } if (!mController.isMoveCapable(account)) { menu.findItem(R.id.move).setVisible(false); menu.findItem(R.id.archive).setVisible(false); menu.findItem(R.id.spam).setVisible(false); } if (!account.hasArchiveFolder()) { menu.findItem(R.id.archive).setVisible(false); } if (!account.hasSpamFolder()) { menu.findItem(R.id.spam).setVisible(false); } } public void onSwipeRightToLeft(final MotionEvent e1, final MotionEvent e2) { // Handle right-to-left as an un-select handleSwipe(e1, false); } public void onSwipeLeftToRight(final MotionEvent e1, final MotionEvent e2) { // Handle left-to-right as a select. handleSwipe(e1, true); } /** * Handle a select or unselect swipe event. * * @param downMotion * Event that started the swipe * @param selected * {@code true} if this was an attempt to select (i.e. left to right). */ private void handleSwipe(final MotionEvent downMotion, final boolean selected) { int x = (int) downMotion.getRawX(); int y = (int) downMotion.getRawY(); Rect headerRect = new Rect(); mListView.getGlobalVisibleRect(headerRect); // Only handle swipes in the visible area of the message list if (headerRect.contains(x, y)) { int[] listPosition = new int[2]; mListView.getLocationOnScreen(listPosition); int listX = x - listPosition[0]; int listY = y - listPosition[1]; int listViewPosition = mListView.pointToPosition(listX, listY); toggleMessageSelect(listViewPosition); } } private int listViewToAdapterPosition(int position) { if (position > 0 && position <= mAdapter.getCount()) { return position - 1; } return AdapterView.INVALID_POSITION; } private int adapterToListViewPosition(int position) { if (position >= 0 && position < mAdapter.getCount()) { return position + 1; } return AdapterView.INVALID_POSITION; } class MessageListActivityListener extends ActivityListener { @Override public void remoteSearchFailed(String folder, final String err) { mHandler.post(new Runnable() { @Override public void run() { Activity activity = getActivity(); if (activity != null) { Toast.makeText(activity, R.string.remote_search_error, Toast.LENGTH_LONG).show(); } } }); } @Override public void remoteSearchStarted(String folder) { mHandler.progress(true); mHandler.updateFooter(mContext.getString(R.string.remote_search_sending_query)); } @Override public void enableProgressIndicator(boolean enable) { mHandler.progress(enable); } @Override public void remoteSearchFinished(String folder, int numResults, int maxResults, List<Message> extraResults) { mHandler.progress(false); mHandler.remoteSearchFinished(); mExtraSearchResults = extraResults; if (extraResults != null && extraResults.size() > 0) { mHandler.updateFooter(String.format(mContext.getString(R.string.load_more_messages_fmt), maxResults)); } else { mHandler.updateFooter(null); } mFragmentListener.setMessageListProgress(Window.PROGRESS_END); } @Override public void remoteSearchServerQueryComplete(String folderName, int numResults, int maxResults) { mHandler.progress(true); if (maxResults != 0 && numResults > maxResults) { mHandler.updateFooter(mContext.getString(R.string.remote_search_downloading_limited, maxResults, numResults)); } else { mHandler.updateFooter(mContext.getString(R.string.remote_search_downloading, numResults)); } mFragmentListener.setMessageListProgress(Window.PROGRESS_START); } @Override public void informUserOfStatus() { mHandler.refreshTitle(); } @Override public void synchronizeMailboxStarted(Account account, String folder) { if (updateForMe(account, folder)) { mHandler.progress(true); mHandler.folderLoading(folder, true); } super.synchronizeMailboxStarted(account, folder); } @Override public void synchronizeMailboxFinished(Account account, String folder, int totalMessagesInMailbox, int numNewMessages) { if (updateForMe(account, folder)) { mHandler.progress(false); mHandler.folderLoading(folder, false); } super.synchronizeMailboxFinished(account, folder, totalMessagesInMailbox, numNewMessages); } @Override public void synchronizeMailboxFailed(Account account, String folder, String message) { if (updateForMe(account, folder)) { mHandler.progress(false); mHandler.folderLoading(folder, false); } super.synchronizeMailboxFailed(account, folder, message); } @Override public void folderStatusChanged(Account account, String folder, int unreadMessageCount) { if (isSingleAccountMode() && isSingleFolderMode() && mAccount.equals(account) && mFolderName.equals(folder)) { mUnreadMessageCount = unreadMessageCount; } super.folderStatusChanged(account, folder, unreadMessageCount); } private boolean updateForMe(Account account, String folder) { if (account == null || folder == null) { return false; } if (!Utility.arrayContains(mAccountUuids, account.getUuid())) { return false; } List<String> folderNames = mSearch.getFolderNames(); return (folderNames.isEmpty() || folderNames.contains(folder)); } } class MessageListAdapter extends CursorAdapter { private Drawable mAttachmentIcon; private Drawable mForwardedIcon; private Drawable mAnsweredIcon; private Drawable mForwardedAnsweredIcon; MessageListAdapter() { super(getActivity(), null, 0); mAttachmentIcon = getResources().getDrawable(R.drawable.ic_email_attachment_small); mAnsweredIcon = getResources().getDrawable(R.drawable.ic_email_answered_small); mForwardedIcon = getResources().getDrawable(R.drawable.ic_email_forwarded_small); mForwardedAnsweredIcon = getResources().getDrawable(R.drawable.ic_email_forwarded_answered_small); } private String recipientSigil(boolean toMe, boolean ccMe) { if (toMe) { return getString(R.string.messagelist_sent_to_me_sigil); } else if (ccMe) { return getString(R.string.messagelist_sent_cc_me_sigil); } else { return ""; } } @Override public View newView(Context context, Cursor cursor, ViewGroup parent) { View view = mInflater.inflate(R.layout.message_list_item, parent, false); MessageViewHolder holder = new MessageViewHolder(); holder.date = (TextView) view.findViewById(R.id.date); holder.chip = view.findViewById(R.id.chip); if (mPreviewLines == 0 && mContactsPictureLoader == null) { view.findViewById(R.id.preview).setVisibility(View.GONE); holder.preview = (TextView) view.findViewById(R.id.sender_compact); holder.flagged = (CheckBox) view.findViewById(R.id.flagged_center_right); view.findViewById(R.id.flagged_bottom_right).setVisibility(View.GONE); } else { view.findViewById(R.id.sender_compact).setVisibility(View.GONE); holder.preview = (TextView) view.findViewById(R.id.preview); holder.flagged = (CheckBox) view.findViewById(R.id.flagged_bottom_right); view.findViewById(R.id.flagged_center_right).setVisibility(View.GONE); } QuickContactBadge contactBadge = (QuickContactBadge) view.findViewById(R.id.contact_badge); if (mContactsPictureLoader != null) { holder.contactBadge = contactBadge; } else { contactBadge.setVisibility(View.GONE); } if (mSenderAboveSubject) { holder.from = (TextView) view.findViewById(R.id.subject); mFontSizes.setViewTextSize(holder.from, mFontSizes.getMessageListSender()); } else { holder.subject = (TextView) view.findViewById(R.id.subject); mFontSizes.setViewTextSize(holder.subject, mFontSizes.getMessageListSubject()); } mFontSizes.setViewTextSize(holder.date, mFontSizes.getMessageListDate()); // 1 preview line is needed even if it is set to 0, because subject is part of the same text view holder.preview.setLines(Math.max(mPreviewLines,1)); mFontSizes.setViewTextSize(holder.preview, mFontSizes.getMessageListPreview()); holder.threadCount = (TextView) view.findViewById(R.id.thread_count); mFontSizes.setViewTextSize(holder.threadCount, mFontSizes.getMessageListSubject()); // thread count is next to subject view.findViewById(R.id.selected_checkbox_wrapper).setVisibility((mCheckboxes) ? View.VISIBLE : View.GONE); holder.flagged.setVisibility(mStars ? View.VISIBLE : View.GONE); holder.flagged.setOnClickListener(holder); holder.selected = (CheckBox) view.findViewById(R.id.selected_checkbox); holder.selected.setOnClickListener(holder); view.setTag(holder); return view; } @Override public void bindView(View view, Context context, Cursor cursor) { Account account = getAccountFromCursor(cursor); String fromList = cursor.getString(SENDER_LIST_COLUMN); String toList = cursor.getString(TO_LIST_COLUMN); String ccList = cursor.getString(CC_LIST_COLUMN); Address[] fromAddrs = Address.unpack(fromList); Address[] toAddrs = Address.unpack(toList); Address[] ccAddrs = Address.unpack(ccList); boolean fromMe = mMessageHelper.toMe(account, fromAddrs); boolean toMe = mMessageHelper.toMe(account, toAddrs); boolean ccMe = mMessageHelper.toMe(account, ccAddrs); CharSequence displayName = mMessageHelper.getDisplayName(account, fromAddrs, toAddrs); CharSequence displayDate = DateUtils.getRelativeTimeSpanString(context, cursor.getLong(DATE_COLUMN)); Address counterpartyAddress = null; if (fromMe) { if (toAddrs.length > 0) { counterpartyAddress = toAddrs[0]; } else if (ccAddrs.length > 0) { counterpartyAddress = ccAddrs[0]; } } else if (fromAddrs.length > 0) { counterpartyAddress = fromAddrs[0]; } int threadCount = (mThreadedList) ? cursor.getInt(THREAD_COUNT_COLUMN) : 0; String subject = cursor.getString(SUBJECT_COLUMN); if (TextUtils.isEmpty(subject)) { subject = getString(R.string.general_no_subject); } else if (threadCount > 1) { // If this is a thread, strip the RE/FW from the subject. "Be like Outlook." subject = Utility.stripSubject(subject); } boolean read = (cursor.getInt(READ_COLUMN) == 1); boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1); boolean answered = (cursor.getInt(ANSWERED_COLUMN) == 1); boolean forwarded = (cursor.getInt(FORWARDED_COLUMN) == 1); boolean hasAttachments = (cursor.getInt(ATTACHMENT_COUNT_COLUMN) > 0); MessageViewHolder holder = (MessageViewHolder) view.getTag(); int maybeBoldTypeface = (read) ? Typeface.NORMAL : Typeface.BOLD; long uniqueId = cursor.getLong(mUniqueIdColumn); boolean selected = mSelected.contains(uniqueId); holder.chip.setBackgroundColor(account.getChipColor()); if (mCheckboxes) { holder.selected.setChecked(selected); } if (mStars) { holder.flagged.setChecked(flagged); } holder.position = cursor.getPosition(); if (holder.contactBadge != null) { if (counterpartyAddress != null) { Utility.setContactForBadge(holder.contactBadge, counterpartyAddress); /* * At least in Android 2.2 a different background + padding is used when no * email address is available. ListView reuses the views but QuickContactBadge * doesn't reset the padding, so we do it ourselves. */ holder.contactBadge.setPadding(0, 0, 0, 0); mContactsPictureLoader.loadContactPicture(counterpartyAddress, holder.contactBadge); } else { holder.contactBadge.assignContactUri(null); holder.contactBadge.setImageResource(R.drawable.ic_contact_picture); } } // Background color if (selected || K9.useBackgroundAsUnreadIndicator()) { int res; if (selected) { res = R.attr.messageListSelectedBackgroundColor; } else if (read) { res = R.attr.messageListReadItemBackgroundColor; } else { res = R.attr.messageListUnreadItemBackgroundColor; } TypedValue outValue = new TypedValue(); getActivity().getTheme().resolveAttribute(res, outValue, true); view.setBackgroundColor(outValue.data); } else { view.setBackgroundColor(Color.TRANSPARENT); } if (mActiveMessage != null) { String uid = cursor.getString(UID_COLUMN); String folderName = cursor.getString(FOLDER_NAME_COLUMN); if (account.getUuid().equals(mActiveMessage.getAccountUuid()) && folderName.equals(mActiveMessage.getFolderName()) && uid.equals(mActiveMessage.getUid())) { int res = R.attr.messageListActiveItemBackgroundColor; TypedValue outValue = new TypedValue(); getActivity().getTheme().resolveAttribute(res, outValue, true); view.setBackgroundColor(outValue.data); } } // Thread count if (threadCount > 1) { holder.threadCount.setText(String.format("%d", threadCount)); holder.threadCount.setVisibility(View.VISIBLE); } else { holder.threadCount.setVisibility(View.GONE); } CharSequence beforePreviewText = (mSenderAboveSubject) ? subject : displayName; String sigil = recipientSigil(toMe, ccMe); SpannableStringBuilder messageStringBuilder = new SpannableStringBuilder(sigil) .append(beforePreviewText); if (mPreviewLines > 0) { String preview = getPreview(cursor); messageStringBuilder.append(" ").append(preview); } holder.preview.setText(messageStringBuilder, TextView.BufferType.SPANNABLE); Spannable str = (Spannable)holder.preview.getText(); // Create a span section for the sender, and assign the correct font size and weight int fontSize = (mSenderAboveSubject) ? mFontSizes.getMessageListSubject(): mFontSizes.getMessageListSender(); AbsoluteSizeSpan span = new AbsoluteSizeSpan(fontSize, true); str.setSpan(span, 0, beforePreviewText.length() + sigil.length(), Spannable.SPAN_EXCLUSIVE_EXCLUSIVE); //TODO: make this part of the theme int color = (K9.getK9Theme() == K9.Theme.LIGHT) ? Color.rgb(105, 105, 105) : Color.rgb(160, 160, 160); // Set span (color) for preview message str.setSpan(new ForegroundColorSpan(color), beforePreviewText.length() + sigil.length(), str.length(), Spannable.SPAN_EXCLUSIVE_EXCLUSIVE); Drawable statusHolder = null; if (forwarded && answered) { statusHolder = mForwardedAnsweredIcon; } else if (answered) { statusHolder = mAnsweredIcon; } else if (forwarded) { statusHolder = mForwardedIcon; } if (holder.from != null ) { holder.from.setTypeface(Typeface.create(holder.from.getTypeface(), maybeBoldTypeface)); if (mSenderAboveSubject) { holder.from.setCompoundDrawablesWithIntrinsicBounds( statusHolder, // left null, // top hasAttachments ? mAttachmentIcon : null, // right null); // bottom holder.from.setText(displayName); } else { holder.from.setText(new SpannableStringBuilder(sigil).append(displayName)); } } if (holder.subject != null ) { if (!mSenderAboveSubject) { holder.subject.setCompoundDrawablesWithIntrinsicBounds( statusHolder, // left null, // top hasAttachments ? mAttachmentIcon : null, // right null); // bottom } holder.subject.setTypeface(Typeface.create(holder.subject.getTypeface(), maybeBoldTypeface)); holder.subject.setText(subject); } holder.date.setText(displayDate); } private String getPreview(Cursor cursor) { String previewTypeString = cursor.getString(PREVIEW_TYPE_COLUMN); DatabasePreviewType previewType = DatabasePreviewType.fromDatabaseValue(previewTypeString); switch (previewType) { case NONE: { return ""; } case ENCRYPTED: { return getString(R.string.preview_encrypted); } case TEXT: { return cursor.getString(PREVIEW_COLUMN); } } throw new AssertionError("Unknown preview type: " + previewType); } } class MessageViewHolder implements View.OnClickListener { public TextView subject; public TextView preview; public TextView from; public TextView time; public TextView date; public View chip; public TextView threadCount; public CheckBox flagged; public CheckBox selected; public int position = -1; public QuickContactBadge contactBadge; @Override public void onClick(View view) { if (position != -1) { switch (view.getId()) { case R.id.selected_checkbox: toggleMessageSelectWithAdapterPosition(position); break; case R.id.flagged_bottom_right: case R.id.flagged_center_right: toggleMessageFlagWithAdapterPosition(position); break; } } } } private View getFooterView(ViewGroup parent) { if (mFooterView == null) { mFooterView = mInflater.inflate(R.layout.message_list_item_footer, parent, false); FooterViewHolder holder = new FooterViewHolder(); holder.main = (TextView) mFooterView.findViewById(R.id.main_text); mFooterView.setTag(holder); } return mFooterView; } private void updateFooterView() { if (!mSearch.isManualSearch() && mCurrentFolder != null && mAccount != null) { if (mCurrentFolder.loading) { updateFooter(mContext.getString(R.string.status_loading_more)); } else if (!mCurrentFolder.moreMessages) { updateFooter(null); } else { String message; if (!mCurrentFolder.lastCheckFailed) { if (mAccount.getDisplayCount() == 0) { message = mContext.getString(R.string.message_list_load_more_messages_action); } else { message = String.format(mContext.getString(R.string.load_more_messages_fmt), mAccount.getDisplayCount()); } } else { message = mContext.getString(R.string.status_loading_more_failed); } updateFooter(message); } } else { updateFooter(null); } } public void updateFooter(final String text) { if (mFooterView == null) { return; } FooterViewHolder holder = (FooterViewHolder) mFooterView.getTag(); if (text != null) { holder.main.setText(text); holder.main.setVisibility(View.VISIBLE); } else { holder.main.setVisibility(View.GONE); } } static class FooterViewHolder { public TextView main; } /** * Set selection state for all messages. * * @param selected * If {@code true} all messages get selected. Otherwise, all messages get deselected and * action mode is finished. */ private void setSelectionState(boolean selected) { if (selected) { if (mAdapter.getCount() == 0) { // Nothing to do if there are no messages return; } mSelectedCount = 0; for (int i = 0, end = mAdapter.getCount(); i < end; i++) { Cursor cursor = (Cursor) mAdapter.getItem(i); long uniqueId = cursor.getLong(mUniqueIdColumn); mSelected.add(uniqueId); if (mThreadedList) { int threadCount = cursor.getInt(THREAD_COUNT_COLUMN); mSelectedCount += (threadCount > 1) ? threadCount : 1; } else { mSelectedCount++; } } if (mActionMode == null) { startAndPrepareActionMode(); } computeBatchDirection(); updateActionModeTitle(); computeSelectAllVisibility(); } else { mSelected.clear(); mSelectedCount = 0; if (mActionMode != null) { mActionMode.finish(); mActionMode = null; } } mAdapter.notifyDataSetChanged(); } private void toggleMessageSelect(int listViewPosition) { int adapterPosition = listViewToAdapterPosition(listViewPosition); if (adapterPosition == AdapterView.INVALID_POSITION) { return; } toggleMessageSelectWithAdapterPosition(adapterPosition); } private void toggleMessageFlagWithAdapterPosition(int adapterPosition) { Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition); boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1); setFlag(adapterPosition,Flag.FLAGGED, !flagged); } private void toggleMessageSelectWithAdapterPosition(int adapterPosition) { Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition); long uniqueId = cursor.getLong(mUniqueIdColumn); boolean selected = mSelected.contains(uniqueId); if (!selected) { mSelected.add(uniqueId); } else { mSelected.remove(uniqueId); } int selectedCountDelta = 1; if (mThreadedList) { int threadCount = cursor.getInt(THREAD_COUNT_COLUMN); if (threadCount > 1) { selectedCountDelta = threadCount; } } if (mActionMode != null) { if (mSelectedCount == selectedCountDelta && selected) { mActionMode.finish(); mActionMode = null; return; } } else { startAndPrepareActionMode(); } if (selected) { mSelectedCount -= selectedCountDelta; } else { mSelectedCount += selectedCountDelta; } computeBatchDirection(); updateActionModeTitle(); computeSelectAllVisibility(); mAdapter.notifyDataSetChanged(); } private void updateActionModeTitle() { mActionMode.setTitle(String.format(getString(R.string.actionbar_selected), mSelectedCount)); } private void computeSelectAllVisibility() { mActionModeCallback.showSelectAll(mSelected.size() != mAdapter.getCount()); } private void computeBatchDirection() { boolean isBatchFlag = false; boolean isBatchRead = false; for (int i = 0, end = mAdapter.getCount(); i < end; i++) { Cursor cursor = (Cursor) mAdapter.getItem(i); long uniqueId = cursor.getLong(mUniqueIdColumn); if (mSelected.contains(uniqueId)) { boolean read = (cursor.getInt(READ_COLUMN) == 1); boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1); if (!flagged) { isBatchFlag = true; } if (!read) { isBatchRead = true; } if (isBatchFlag && isBatchRead) { break; } } } mActionModeCallback.showMarkAsRead(isBatchRead); mActionModeCallback.showFlag(isBatchFlag); } private void setFlag(int adapterPosition, final Flag flag, final boolean newState) { if (adapterPosition == AdapterView.INVALID_POSITION) { return; } Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition); Account account = mPreferences.getAccount(cursor.getString(ACCOUNT_UUID_COLUMN)); if (mThreadedList && cursor.getInt(THREAD_COUNT_COLUMN) > 1) { long threadRootId = cursor.getLong(THREAD_ROOT_COLUMN); mController.setFlagForThreads(account, Collections.singletonList(Long.valueOf(threadRootId)), flag, newState); } else { long id = cursor.getLong(ID_COLUMN); mController.setFlag(account, Collections.singletonList(Long.valueOf(id)), flag, newState); } computeBatchDirection(); } private void setFlagForSelected(final Flag flag, final boolean newState) { if (mSelected.isEmpty()) { return; } Map<Account, List<Long>> messageMap = new HashMap<>(); Map<Account, List<Long>> threadMap = new HashMap<>(); Set<Account> accounts = new HashSet<>(); for (int position = 0, end = mAdapter.getCount(); position < end; position++) { Cursor cursor = (Cursor) mAdapter.getItem(position); long uniqueId = cursor.getLong(mUniqueIdColumn); if (mSelected.contains(uniqueId)) { String uuid = cursor.getString(ACCOUNT_UUID_COLUMN); Account account = mPreferences.getAccount(uuid); accounts.add(account); if (mThreadedList && cursor.getInt(THREAD_COUNT_COLUMN) > 1) { List<Long> threadRootIdList = threadMap.get(account); if (threadRootIdList == null) { threadRootIdList = new ArrayList<>(); threadMap.put(account, threadRootIdList); } threadRootIdList.add(cursor.getLong(THREAD_ROOT_COLUMN)); } else { List<Long> messageIdList = messageMap.get(account); if (messageIdList == null) { messageIdList = new ArrayList<>(); messageMap.put(account, messageIdList); } messageIdList.add(cursor.getLong(ID_COLUMN)); } } } for (Account account : accounts) { List<Long> messageIds = messageMap.get(account); List<Long> threadRootIds = threadMap.get(account); if (messageIds != null) { mController.setFlag(account, messageIds, flag, newState); } if (threadRootIds != null) { mController.setFlagForThreads(account, threadRootIds, flag, newState); } } computeBatchDirection(); } private void onMove(MessageReference message) { onMove(Collections.singletonList(message)); } /** * Display the message move activity. * * @param messages * Never {@code null}. */ private void onMove(List<MessageReference> messages) { if (!checkCopyOrMovePossible(messages, FolderOperation.MOVE)) { return; } String folderName; if (mIsThreadDisplay) { folderName = messages.get(0).getFolderName(); } else if (mSingleFolderMode) { folderName = mCurrentFolder.folder.getName(); } else { folderName = null; } displayFolderChoice(ACTIVITY_CHOOSE_FOLDER_MOVE, folderName, messages.get(0).getAccountUuid(), null, messages); } private void onCopy(MessageReference message) { onCopy(Collections.singletonList(message)); } /** * Display the message copy activity. * * @param messages * Never {@code null}. */ private void onCopy(List<MessageReference> messages) { if (!checkCopyOrMovePossible(messages, FolderOperation.COPY)) { return; } String folderName; if (mIsThreadDisplay) { folderName = messages.get(0).getFolderName(); } else if (mSingleFolderMode) { folderName = mCurrentFolder.folder.getName(); } else { folderName = null; } displayFolderChoice(ACTIVITY_CHOOSE_FOLDER_COPY, folderName, messages.get(0).getAccountUuid(), null, messages); } private void onDebugClearLocally(MessageReference message) { mController.debugClearMessagesLocally(Collections.singletonList(message)); } /** * Helper method to manage the invocation of {@link #startActivityForResult(Intent, int)} for a * folder operation ({@link ChooseFolder} activity), while saving a list of associated messages. * * @param requestCode * If {@code >= 0}, this code will be returned in {@code onActivityResult()} when the * activity exits. * * @see #startActivityForResult(Intent, int) */ private void displayFolderChoice(int requestCode, String sourceFolderName, String accountUuid, String lastSelectedFolderName, List<MessageReference> messages) { Intent intent = new Intent(getActivity(), ChooseFolder.class); intent.putExtra(ChooseFolder.EXTRA_ACCOUNT, accountUuid); intent.putExtra(ChooseFolder.EXTRA_SEL_FOLDER, lastSelectedFolderName); if (sourceFolderName == null) { intent.putExtra(ChooseFolder.EXTRA_SHOW_CURRENT, "yes"); } else { intent.putExtra(ChooseFolder.EXTRA_CUR_FOLDER, sourceFolderName); } // remember the selected messages for #onActivityResult mActiveMessages = messages; startActivityForResult(intent, requestCode); } private void onArchive(MessageReference message) { onArchive(Collections.singletonList(message)); } private void onArchive(final List<MessageReference> messages) { Map<Account, List<MessageReference>> messagesByAccount = groupMessagesByAccount(messages); for (Entry<Account, List<MessageReference>> entry : messagesByAccount.entrySet()) { Account account = entry.getKey(); String archiveFolder = account.getArchiveFolderName(); if (!K9.FOLDER_NONE.equals(archiveFolder)) { move(entry.getValue(), archiveFolder); } } } private Map<Account, List<MessageReference>> groupMessagesByAccount(final List<MessageReference> messages) { Map<Account, List<MessageReference>> messagesByAccount = new HashMap<>(); for (MessageReference message : messages) { Account account = mPreferences.getAccount(message.getAccountUuid()); List<MessageReference> msgList = messagesByAccount.get(account); if (msgList == null) { msgList = new ArrayList<>(); messagesByAccount.put(account, msgList); } msgList.add(message); } return messagesByAccount; } private void onSpam(MessageReference message) { onSpam(Collections.singletonList(message)); } /** * Move messages to the spam folder. * * @param messages * The messages to move to the spam folder. Never {@code null}. */ private void onSpam(List<MessageReference> messages) { if (K9.confirmSpam()) { // remember the message selection for #onCreateDialog(int) mActiveMessages = messages; showDialog(R.id.dialog_confirm_spam); } else { onSpamConfirmed(messages); } } private void onSpamConfirmed(List<MessageReference> messages) { Map<Account, List<MessageReference>> messagesByAccount = groupMessagesByAccount(messages); for (Entry<Account, List<MessageReference>> entry : messagesByAccount.entrySet()) { Account account = entry.getKey(); String spamFolder = account.getSpamFolderName(); if (!K9.FOLDER_NONE.equals(spamFolder)) { move(entry.getValue(), spamFolder); } } } private static enum FolderOperation { COPY, MOVE } /** * Display a Toast message if any message isn't synchronized * * @param messages * The messages to copy or move. Never {@code null}. * @param operation * The type of operation to perform. Never {@code null}. * * @return {@code true}, if operation is possible. */ private boolean checkCopyOrMovePossible(final List<MessageReference> messages, final FolderOperation operation) { if (messages.isEmpty()) { return false; } boolean first = true; for (MessageReference message : messages) { if (first) { first = false; Account account = mPreferences.getAccount(message.getAccountUuid()); if ((operation == FolderOperation.MOVE && !mController.isMoveCapable(account)) || (operation == FolderOperation.COPY && !mController.isCopyCapable(account))) { return false; } } // message check if ((operation == FolderOperation.MOVE && !mController.isMoveCapable(message)) || (operation == FolderOperation.COPY && !mController.isCopyCapable(message))) { final Toast toast = Toast.makeText(getActivity(), R.string.move_copy_cannot_copy_unsynced_message, Toast.LENGTH_LONG); toast.show(); return false; } } return true; } /** * Copy the specified messages to the specified folder. * * @param messages * List of messages to copy. Never {@code null}. * @param destination * The name of the destination folder. Never {@code null}. */ private void copy(List<MessageReference> messages, final String destination) { copyOrMove(messages, destination, FolderOperation.COPY); } /** * Move the specified messages to the specified folder. * * @param messages * The list of messages to move. Never {@code null}. * @param destination * The name of the destination folder. Never {@code null}. */ private void move(List<MessageReference> messages, final String destination) { copyOrMove(messages, destination, FolderOperation.MOVE); } /** * The underlying implementation for {@link #copy(List, String)} and * {@link #move(List, String)}. This method was added mainly because those 2 * methods share common behavior. * * @param messages * The list of messages to copy or move. Never {@code null}. * @param destination * The name of the destination folder. Never {@code null} or {@link K9#FOLDER_NONE}. * @param operation * Specifies what operation to perform. Never {@code null}. */ private void copyOrMove(List<MessageReference> messages, final String destination, final FolderOperation operation) { Map<String, List<MessageReference>> folderMap = new HashMap<>(); for (MessageReference message : messages) { if ((operation == FolderOperation.MOVE && !mController.isMoveCapable(message)) || (operation == FolderOperation.COPY && !mController.isCopyCapable(message))) { Toast.makeText(getActivity(), R.string.move_copy_cannot_copy_unsynced_message, Toast.LENGTH_LONG).show(); // XXX return meaningful error value? // message isn't synchronized return; } String folderName = message.getFolderName(); if (folderName.equals(destination)) { // Skip messages already in the destination folder continue; } List<MessageReference> outMessages = folderMap.get(folderName); if (outMessages == null) { outMessages = new ArrayList<>(); folderMap.put(folderName, outMessages); } outMessages.add(message); } for (Map.Entry<String, List<MessageReference>> entry : folderMap.entrySet()) { String folderName = entry.getKey(); List<MessageReference> outMessages = entry.getValue(); Account account = mPreferences.getAccount(outMessages.get(0).getAccountUuid()); if (operation == FolderOperation.MOVE) { if (mThreadedList) { mController.moveMessagesInThread(account, folderName, outMessages, destination); } else { mController.moveMessages(account, folderName, outMessages, destination); } } else { if (mThreadedList) { mController.copyMessagesInThread(account, folderName, outMessages, destination); } else { mController.copyMessages(account, folderName, outMessages, destination); } } } } class ActionModeCallback implements ActionMode.Callback { private MenuItem mSelectAll; private MenuItem mMarkAsRead; private MenuItem mMarkAsUnread; private MenuItem mFlag; private MenuItem mUnflag; @Override public boolean onPrepareActionMode(ActionMode mode, Menu menu) { mSelectAll = menu.findItem(R.id.select_all); mMarkAsRead = menu.findItem(R.id.mark_as_read); mMarkAsUnread = menu.findItem(R.id.mark_as_unread); mFlag = menu.findItem(R.id.flag); mUnflag = menu.findItem(R.id.unflag); // we don't support cross account actions atm if (!mSingleAccountMode) { // show all menu.findItem(R.id.move).setVisible(true); menu.findItem(R.id.archive).setVisible(true); menu.findItem(R.id.spam).setVisible(true); menu.findItem(R.id.copy).setVisible(true); Set<String> accountUuids = getAccountUuidsForSelected(); for (String accountUuid : accountUuids) { Account account = mPreferences.getAccount(accountUuid); if (account != null) { setContextCapabilities(account, menu); } } } return true; } /** * Get the set of account UUIDs for the selected messages. */ private Set<String> getAccountUuidsForSelected() { int maxAccounts = mAccountUuids.length; Set<String> accountUuids = new HashSet<>(maxAccounts); for (int position = 0, end = mAdapter.getCount(); position < end; position++) { Cursor cursor = (Cursor) mAdapter.getItem(position); long uniqueId = cursor.getLong(mUniqueIdColumn); if (mSelected.contains(uniqueId)) { String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN); accountUuids.add(accountUuid); if (accountUuids.size() == mAccountUuids.length) { break; } } } return accountUuids; } @Override public void onDestroyActionMode(ActionMode mode) { mActionMode = null; mSelectAll = null; mMarkAsRead = null; mMarkAsUnread = null; mFlag = null; mUnflag = null; setSelectionState(false); } @Override public boolean onCreateActionMode(ActionMode mode, Menu menu) { MenuInflater inflater = mode.getMenuInflater(); inflater.inflate(R.menu.message_list_context, menu); // check capabilities setContextCapabilities(mAccount, menu); return true; } /** * Disables menu options not supported by the account type or current "search view". * * @param account * The account to query for its capabilities. * @param menu * The menu to adapt. */ private void setContextCapabilities(Account account, Menu menu) { if (!mSingleAccountMode) { // We don't support cross-account copy/move operations right now menu.findItem(R.id.move).setVisible(false); menu.findItem(R.id.copy).setVisible(false); //TODO: we could support the archive and spam operations if all selected messages // belong to non-POP3 accounts menu.findItem(R.id.archive).setVisible(false); menu.findItem(R.id.spam).setVisible(false); } else { // hide unsupported if (!mController.isCopyCapable(account)) { menu.findItem(R.id.copy).setVisible(false); } if (!mController.isMoveCapable(account)) { menu.findItem(R.id.move).setVisible(false); menu.findItem(R.id.archive).setVisible(false); menu.findItem(R.id.spam).setVisible(false); } if (!account.hasArchiveFolder()) { menu.findItem(R.id.archive).setVisible(false); } if (!account.hasSpamFolder()) { menu.findItem(R.id.spam).setVisible(false); } } } public void showSelectAll(boolean show) { if (mActionMode != null) { mSelectAll.setVisible(show); } } public void showMarkAsRead(boolean show) { if (mActionMode != null) { mMarkAsRead.setVisible(show); mMarkAsUnread.setVisible(!show); } } public void showFlag(boolean show) { if (mActionMode != null) { mFlag.setVisible(show); mUnflag.setVisible(!show); } } @Override public boolean onActionItemClicked(ActionMode mode, MenuItem item) { /* * In the following we assume that we can't move or copy * mails to the same folder. Also that spam isn't available if we are * in the spam folder,same for archive. * * This is the case currently so safe assumption. */ switch (item.getItemId()) { case R.id.delete: { List<MessageReference> messages = getCheckedMessages(); onDelete(messages); mSelectedCount = 0; break; } case R.id.mark_as_read: { setFlagForSelected(Flag.SEEN, true); break; } case R.id.mark_as_unread: { setFlagForSelected(Flag.SEEN, false); break; } case R.id.flag: { setFlagForSelected(Flag.FLAGGED, true); break; } case R.id.unflag: { setFlagForSelected(Flag.FLAGGED, false); break; } case R.id.select_all: { selectAll(); break; } // only if the account supports this case R.id.archive: { onArchive(getCheckedMessages()); mSelectedCount = 0; break; } case R.id.spam: { onSpam(getCheckedMessages()); mSelectedCount = 0; break; } case R.id.move: { onMove(getCheckedMessages()); mSelectedCount = 0; break; } case R.id.copy: { onCopy(getCheckedMessages()); mSelectedCount = 0; break; } } if (mSelectedCount == 0) { mActionMode.finish(); } return true; } } @Override public void doPositiveClick(int dialogId) { switch (dialogId) { case R.id.dialog_confirm_spam: { onSpamConfirmed(mActiveMessages); // No further need for this reference mActiveMessages = null; break; } case R.id.dialog_confirm_delete: { onDeleteConfirmed(mActiveMessages); mActiveMessage = null; break; } case R.id.dialog_confirm_mark_all_as_read: { markAllAsRead(); break; } } } @Override public void doNegativeClick(int dialogId) { switch (dialogId) { case R.id.dialog_confirm_spam: case R.id.dialog_confirm_delete: { // No further need for this reference mActiveMessages = null; break; } } } @Override public void dialogCancelled(int dialogId) { doNegativeClick(dialogId); } public void checkMail() { if (isSingleAccountMode() && isSingleFolderMode()) { mController.synchronizeMailbox(mAccount, mFolderName, mListener, null); mController.sendPendingMessages(mAccount, mListener); } else if (mAllAccounts) { mController.checkMail(mContext, null, true, true, mListener); } else { for (String accountUuid : mAccountUuids) { Account account = mPreferences.getAccount(accountUuid); mController.checkMail(mContext, account, true, true, mListener); } } } /** * We need to do some special clean up when leaving a remote search result screen. If no * remote search is in progress, this method does nothing special. */ @Override public void onStop() { // If we represent a remote search, then kill that before going back. if (isRemoteSearch() && mRemoteSearchFuture != null) { try { Log.i(K9.LOG_TAG, "Remote search in progress, attempting to abort..."); // Canceling the future stops any message fetches in progress. final boolean cancelSuccess = mRemoteSearchFuture.cancel(true); // mayInterruptIfRunning = true if (!cancelSuccess) { Log.e(K9.LOG_TAG, "Could not cancel remote search future."); } // Closing the folder will kill off the connection if we're mid-search. final Account searchAccount = mAccount; final Folder remoteFolder = mCurrentFolder.folder; remoteFolder.close(); // Send a remoteSearchFinished() message for good measure. mListener.remoteSearchFinished(mCurrentFolder.name, 0, searchAccount.getRemoteSearchNumResults(), null); } catch (Exception e) { // Since the user is going back, log and squash any exceptions. Log.e(K9.LOG_TAG, "Could not abort remote search before going back", e); } } super.onStop(); } public void selectAll() { setSelectionState(true); } public void onMoveUp() { int currentPosition = mListView.getSelectedItemPosition(); if (currentPosition == AdapterView.INVALID_POSITION || mListView.isInTouchMode()) { currentPosition = mListView.getFirstVisiblePosition(); } if (currentPosition > 0) { mListView.setSelection(currentPosition - 1); } } public void onMoveDown() { int currentPosition = mListView.getSelectedItemPosition(); if (currentPosition == AdapterView.INVALID_POSITION || mListView.isInTouchMode()) { currentPosition = mListView.getFirstVisiblePosition(); } if (currentPosition < mListView.getCount()) { mListView.setSelection(currentPosition + 1); } } public boolean openPrevious(MessageReference messageReference) { int position = getPosition(messageReference); if (position <= 0) { return false; } openMessageAtPosition(position - 1); return true; } public boolean openNext(MessageReference messageReference) { int position = getPosition(messageReference); if (position < 0 || position == mAdapter.getCount() - 1) { return false; } openMessageAtPosition(position + 1); return true; } public boolean isFirst(MessageReference messageReference) { return mAdapter.isEmpty() || messageReference.equals(getReferenceForPosition(0)); } public boolean isLast(MessageReference messageReference) { return mAdapter.isEmpty() || messageReference.equals(getReferenceForPosition(mAdapter.getCount() - 1)); } private MessageReference getReferenceForPosition(int position) { Cursor cursor = (Cursor) mAdapter.getItem(position); String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN); String folderName = cursor.getString(FOLDER_NAME_COLUMN); String messageUid = cursor.getString(UID_COLUMN); return new MessageReference(accountUuid, folderName, messageUid, null); } private void openMessageAtPosition(int position) { // Scroll message into view if necessary int listViewPosition = adapterToListViewPosition(position); if (listViewPosition != AdapterView.INVALID_POSITION && (listViewPosition < mListView.getFirstVisiblePosition() || listViewPosition > mListView.getLastVisiblePosition())) { mListView.setSelection(listViewPosition); } MessageReference ref = getReferenceForPosition(position); // For some reason the mListView.setSelection() above won't do anything when we call // onOpenMessage() (and consequently mAdapter.notifyDataSetChanged()) right away. So we // defer the call using MessageListHandler. mHandler.openMessage(ref); } private int getPosition(MessageReference messageReference) { for (int i = 0, len = mAdapter.getCount(); i < len; i++) { Cursor cursor = (Cursor) mAdapter.getItem(i); String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN); String folderName = cursor.getString(FOLDER_NAME_COLUMN); String uid = cursor.getString(UID_COLUMN); if (accountUuid.equals(messageReference.getAccountUuid()) && folderName.equals(messageReference.getFolderName()) && uid.equals(messageReference.getUid())) { return i; } } return -1; } public interface MessageListFragmentListener { void enableActionBarProgress(boolean enable); void setMessageListProgress(int level); void showThread(Account account, String folderName, long rootId); void showMoreFromSameSender(String senderAddress); void onResendMessage(MessageReference message); void onForward(MessageReference message); void onReply(MessageReference message); void onReplyAll(MessageReference message); void openMessage(MessageReference messageReference); void setMessageListTitle(String title); void setMessageListSubTitle(String subTitle); void setUnreadCount(int unread); void onCompose(Account account); boolean startSearch(Account account, String folderName); void remoteSearchStarted(); void goBack(); void updateMenu(); } public void onReverseSort() { changeSort(mSortType); } private MessageReference getSelectedMessage() { int listViewPosition = mListView.getSelectedItemPosition(); int adapterPosition = listViewToAdapterPosition(listViewPosition); return getMessageAtPosition(adapterPosition); } private int getAdapterPositionForSelectedMessage() { int listViewPosition = mListView.getSelectedItemPosition(); return listViewToAdapterPosition(listViewPosition); } private int getPositionForUniqueId(long uniqueId) { for (int position = 0, end = mAdapter.getCount(); position < end; position++) { Cursor cursor = (Cursor) mAdapter.getItem(position); if (cursor.getLong(mUniqueIdColumn) == uniqueId) { return position; } } return AdapterView.INVALID_POSITION; } private MessageReference getMessageAtPosition(int adapterPosition) { if (adapterPosition == AdapterView.INVALID_POSITION) { return null; } Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition); String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN); String folderName = cursor.getString(FOLDER_NAME_COLUMN); String messageUid = cursor.getString(UID_COLUMN); return new MessageReference(accountUuid, folderName, messageUid, null); } private List<MessageReference> getCheckedMessages() { List<MessageReference> messages = new ArrayList<>(mSelected.size()); for (int position = 0, end = mAdapter.getCount(); position < end; position++) { Cursor cursor = (Cursor) mAdapter.getItem(position); long uniqueId = cursor.getLong(mUniqueIdColumn); if (mSelected.contains(uniqueId)) { MessageReference message = getMessageAtPosition(position); if (message != null) { messages.add(message); } } } return messages; } public void onDelete() { MessageReference message = getSelectedMessage(); if (message != null) { onDelete(Collections.singletonList(message)); } } public void toggleMessageSelect() { toggleMessageSelect(mListView.getSelectedItemPosition()); } public void onToggleFlagged() { onToggleFlag(Flag.FLAGGED, FLAGGED_COLUMN); } public void onToggleRead() { onToggleFlag(Flag.SEEN, READ_COLUMN); } private void onToggleFlag(Flag flag, int flagColumn) { int adapterPosition = getAdapterPositionForSelectedMessage(); if (adapterPosition == ListView.INVALID_POSITION) { return; } Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition); boolean flagState = (cursor.getInt(flagColumn) == 1); setFlag(adapterPosition, flag, !flagState); } public void onMove() { MessageReference message = getSelectedMessage(); if (message != null) { onMove(message); } } public void onArchive() { MessageReference message = getSelectedMessage(); if (message != null) { onArchive(message); } } public void onCopy() { MessageReference message = getSelectedMessage(); if (message != null) { onCopy(message); } } public boolean isOutbox() { return (mFolderName != null && mFolderName.equals(mAccount.getOutboxFolderName())); } public boolean isErrorFolder() { return K9.ERROR_FOLDER_NAME.equals(mFolderName); } public boolean isRemoteFolder() { if (mSearch.isManualSearch() || isOutbox() || isErrorFolder()) { return false; } if (!mController.isMoveCapable(mAccount)) { // For POP3 accounts only the Inbox is a remote folder. return (mFolderName != null && mFolderName.equals(mAccount.getInboxFolderName())); } return true; } public boolean isManualSearch() { return mSearch.isManualSearch(); } public boolean isAccountExpungeCapable() { try { return (mAccount != null && mAccount.getRemoteStore().isExpungeCapable()); } catch (Exception e) { return false; } } public void onRemoteSearch() { // Remote search is useless without the network. if (mHasConnectivity) { onRemoteSearchRequested(); } else { Toast.makeText(getActivity(), getText(R.string.remote_search_unavailable_no_network), Toast.LENGTH_SHORT).show(); } } public boolean isRemoteSearch() { return mRemoteSearchPerformed; } public boolean isRemoteSearchAllowed() { if (!mSearch.isManualSearch() || mRemoteSearchPerformed || !mSingleFolderMode) { return false; } boolean allowRemoteSearch = false; final Account searchAccount = mAccount; if (searchAccount != null) { allowRemoteSearch = searchAccount.allowRemoteSearch(); } return allowRemoteSearch; } public boolean onSearchRequested() { String folderName = (mCurrentFolder != null) ? mCurrentFolder.name : null; return mFragmentListener.startSearch(mAccount, folderName); } @Override public Loader<Cursor> onCreateLoader(int id, Bundle args) { String accountUuid = mAccountUuids[id]; Account account = mPreferences.getAccount(accountUuid); String threadId = getThreadId(mSearch); Uri uri; String[] projection; boolean needConditions; if (threadId != null) { uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + accountUuid + "/thread/" + threadId); projection = PROJECTION; needConditions = false; } else if (mThreadedList) { uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + accountUuid + "/messages/threaded"); projection = THREADED_PROJECTION; needConditions = true; } else { uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + accountUuid + "/messages"); projection = PROJECTION; needConditions = true; } StringBuilder query = new StringBuilder(); List<String> queryArgs = new ArrayList<>(); if (needConditions) { boolean selectActive = mActiveMessage != null && mActiveMessage.getAccountUuid().equals(accountUuid); if (selectActive) { query.append("(" + MessageColumns.UID + " = ? AND " + SpecialColumns.FOLDER_NAME + " = ?) OR ("); queryArgs.add(mActiveMessage.getUid()); queryArgs.add(mActiveMessage.getFolderName()); } SqlQueryBuilder.buildWhereClause(account, mSearch.getConditions(), query, queryArgs); if (selectActive) { query.append(')'); } } String selection = query.toString(); String[] selectionArgs = queryArgs.toArray(new String[0]); String sortOrder = buildSortOrder(); return new CursorLoader(getActivity(), uri, projection, selection, selectionArgs, sortOrder); } private String getThreadId(LocalSearch search) { for (ConditionsTreeNode node : search.getLeafSet()) { SearchCondition condition = node.mCondition; if (condition.field == SearchField.THREAD_ID) { return condition.value; } } return null; } private String buildSortOrder() { String sortColumn; switch (mSortType) { case SORT_ARRIVAL: { sortColumn = MessageColumns.INTERNAL_DATE; break; } case SORT_ATTACHMENT: { sortColumn = "(" + MessageColumns.ATTACHMENT_COUNT + " < 1)"; break; } case SORT_FLAGGED: { sortColumn = "(" + MessageColumns.FLAGGED + " != 1)"; break; } case SORT_SENDER: { //FIXME sortColumn = MessageColumns.SENDER_LIST; break; } case SORT_SUBJECT: { sortColumn = MessageColumns.SUBJECT + " COLLATE NOCASE"; break; } case SORT_UNREAD: { sortColumn = MessageColumns.READ; break; } case SORT_DATE: default: { sortColumn = MessageColumns.DATE; } } String sortDirection = (mSortAscending) ? " ASC" : " DESC"; String secondarySort; if (mSortType == SortType.SORT_DATE || mSortType == SortType.SORT_ARRIVAL) { secondarySort = ""; } else { secondarySort = MessageColumns.DATE + ((mSortDateAscending) ? " ASC, " : " DESC, "); } return sortColumn + sortDirection + ", " + secondarySort + MessageColumns.ID + " DESC"; } @Override public void onLoadFinished(Loader<Cursor> loader, Cursor data) { if (mIsThreadDisplay && data.getCount() == 0) { mHandler.goBack(); return; } // Remove the "Loading..." view mPullToRefreshView.setEmptyView(null); setPullToRefreshEnabled(isPullToRefreshAllowed()); final int loaderId = loader.getId(); mCursors[loaderId] = data; mCursorValid[loaderId] = true; Cursor cursor; if (mCursors.length > 1) { cursor = new MergeCursorWithUniqueId(mCursors, getComparator()); mUniqueIdColumn = cursor.getColumnIndex("_id"); } else { cursor = data; mUniqueIdColumn = ID_COLUMN; } if (mIsThreadDisplay) { if (cursor.moveToFirst()) { mTitle = cursor.getString(SUBJECT_COLUMN); if (!TextUtils.isEmpty(mTitle)) { mTitle = Utility.stripSubject(mTitle); } if (TextUtils.isEmpty(mTitle)) { mTitle = getString(R.string.general_no_subject); } updateTitle(); } else { //TODO: empty thread view -> return to full message list } } cleanupSelected(cursor); updateContextMenu(cursor); mAdapter.swapCursor(cursor); resetActionMode(); computeBatchDirection(); if (isLoadFinished()) { if (mSavedListState != null) { mHandler.restoreListPosition(); } mFragmentListener.updateMenu(); } } private void updateMoreMessagesOfCurrentFolder() { if (mFolderName != null) { try { LocalFolder folder = getFolder(mFolderName, mAccount); mCurrentFolder.setMoreMessagesFromFolder(folder); } catch (MessagingException e) { throw new RuntimeException(e); } } } public boolean isLoadFinished() { if (mCursorValid == null) { return false; } for (boolean cursorValid : mCursorValid) { if (!cursorValid) { return false; } } return true; } /** * Close the context menu when the message it was opened for is no longer in the message list. */ private void updateContextMenu(Cursor cursor) { if (mContextMenuUniqueId == 0) { return; } for (cursor.moveToFirst(); !cursor.isAfterLast(); cursor.moveToNext()) { long uniqueId = cursor.getLong(mUniqueIdColumn); if (uniqueId == mContextMenuUniqueId) { return; } } mContextMenuUniqueId = 0; Activity activity = getActivity(); if (activity != null) { activity.closeContextMenu(); } } private void cleanupSelected(Cursor cursor) { if (mSelected.isEmpty()) { return; } Set<Long> selected = new HashSet<>(); for (cursor.moveToFirst(); !cursor.isAfterLast(); cursor.moveToNext()) { long uniqueId = cursor.getLong(mUniqueIdColumn); if (mSelected.contains(uniqueId)) { selected.add(uniqueId); } } mSelected = selected; } /** * Starts or finishes the action mode when necessary. */ private void resetActionMode() { if (mSelected.isEmpty()) { if (mActionMode != null) { mActionMode.finish(); } return; } if (mActionMode == null) { startAndPrepareActionMode(); } recalculateSelectionCount(); updateActionModeTitle(); } private void startAndPrepareActionMode() { mActionMode = getActivity().startActionMode(mActionModeCallback); mActionMode.invalidate(); } /** * Recalculates the selection count. * * <p> * For non-threaded lists this is simply the number of visibly selected messages. If threaded * view is enabled this method counts the number of messages in the selected threads. * </p> */ private void recalculateSelectionCount() { if (!mThreadedList) { mSelectedCount = mSelected.size(); return; } mSelectedCount = 0; for (int i = 0, end = mAdapter.getCount(); i < end; i++) { Cursor cursor = (Cursor) mAdapter.getItem(i); long uniqueId = cursor.getLong(mUniqueIdColumn); if (mSelected.contains(uniqueId)) { int threadCount = cursor.getInt(THREAD_COUNT_COLUMN); mSelectedCount += (threadCount > 1) ? threadCount : 1; } } } @Override public void onLoaderReset(Loader<Cursor> loader) { mSelected.clear(); mAdapter.swapCursor(null); } private Account getAccountFromCursor(Cursor cursor) { String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN); return mPreferences.getAccount(accountUuid); } private void remoteSearchFinished() { mRemoteSearchFuture = null; } /** * Mark a message as 'active'. * * <p> * The active message is the one currently displayed in the message view portion of the split * view. * </p> * * @param messageReference * {@code null} to not mark any message as being 'active'. */ public void setActiveMessage(MessageReference messageReference) { mActiveMessage = messageReference; // Reload message list with modified query that always includes the active message if (isAdded()) { restartLoader(); } // Redraw list immediately if (mAdapter != null) { mAdapter.notifyDataSetChanged(); } } public boolean isSingleAccountMode() { return mSingleAccountMode; } public boolean isSingleFolderMode() { return mSingleFolderMode; } public boolean isInitialized() { return mInitialized; } public boolean isMarkAllAsReadSupported() { return (isSingleAccountMode() && isSingleFolderMode()); } public void confirmMarkAllAsRead() { showDialog(R.id.dialog_confirm_mark_all_as_read); } public void markAllAsRead() { if (isMarkAllAsReadSupported()) { mController.markAllMessagesRead(mAccount, mFolderName); } } public boolean isCheckMailSupported() { return (mAllAccounts || !isSingleAccountMode() || !isSingleFolderMode() || isRemoteFolder()); } private boolean isCheckMailAllowed() { return (!isManualSearch() && isCheckMailSupported()); } private boolean isPullToRefreshAllowed() { return (isRemoteSearchAllowed() || isCheckMailAllowed()); } }
1
14,045
I'm not sure I agree that this should take priority over "Loading".
k9mail-k-9
java
@@ -237,8 +237,6 @@ class PySparkTask(SparkSubmitTask): # Path to the pyspark program passed to spark-submit app = os.path.join(os.path.dirname(__file__), 'pyspark_runner.py') - # Python only supports the client deploy mode, force it - deploy_mode = "client" @property def name(self):
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import sys import tempfile import shutil import importlib import tarfile import inspect try: import cPickle as pickle except ImportError: import pickle from luigi import six from luigi.contrib.external_program import ExternalProgramTask from luigi import configuration logger = logging.getLogger('luigi-interface') class SparkSubmitTask(ExternalProgramTask): """ Template task for running a Spark job Supports running jobs on Spark local, standalone, Mesos or Yarn See http://spark.apache.org/docs/latest/submitting-applications.html for more information """ # Application (.jar or .py file) name = None entry_class = None app = None # Only log stderr if spark fails (since stderr is normally quite verbose) always_log_stderr = False def app_options(self): """ Subclass this method to map your task parameters to the app's arguments """ return [] @property def spark_submit(self): return configuration.get_config().get('spark', 'spark-submit', 'spark-submit') @property def master(self): return configuration.get_config().get("spark", "master", None) @property def deploy_mode(self): return configuration.get_config().get("spark", "deploy-mode", None) @property def jars(self): return self._list_config(configuration.get_config().get("spark", "jars", None)) @property def packages(self): return self._list_config(configuration.get_config().get("spark", "packages", None)) @property def py_files(self): return self._list_config(configuration.get_config().get("spark", "py-files", None)) @property def files(self): return self._list_config(configuration.get_config().get("spark", "files", None)) @property def conf(self): return self._dict_config(configuration.get_config().get("spark", "conf", None)) @property def properties_file(self): return configuration.get_config().get("spark", "properties-file", None) @property def driver_memory(self): return configuration.get_config().get("spark", "driver-memory", None) @property def driver_java_options(self): return configuration.get_config().get("spark", "driver-java-options", None) @property def driver_library_path(self): return configuration.get_config().get("spark", "driver-library-path", None) @property def driver_class_path(self): return configuration.get_config().get("spark", "driver-class-path", None) @property def executor_memory(self): return configuration.get_config().get("spark", "executor-memory", None) @property def driver_cores(self): return configuration.get_config().get("spark", "driver-cores", None) @property def supervise(self): return bool(configuration.get_config().get("spark", "supervise", False)) @property def total_executor_cores(self): return configuration.get_config().get("spark", "total-executor-cores", None) @property def executor_cores(self): return configuration.get_config().get("spark", "executor-cores", None) @property def queue(self): return configuration.get_config().get("spark", "queue", None) @property def num_executors(self): return configuration.get_config().get("spark", "num-executors", None) @property def archives(self): return self._list_config(configuration.get_config().get("spark", "archives", None)) @property def hadoop_conf_dir(self): return configuration.get_config().get("spark", "hadoop-conf-dir", None) def get_environment(self): env = os.environ.copy() hadoop_conf_dir = self.hadoop_conf_dir if hadoop_conf_dir: env['HADOOP_CONF_DIR'] = hadoop_conf_dir return env def program_environment(self): return self.get_environment() def program_args(self): return self.spark_command() + self.app_command() def spark_command(self): command = [self.spark_submit] command += self._text_arg('--master', self.master) command += self._text_arg('--deploy-mode', self.deploy_mode) command += self._text_arg('--name', self.name) command += self._text_arg('--class', self.entry_class) command += self._list_arg('--jars', self.jars) command += self._list_arg('--packages', self.packages) command += self._list_arg('--py-files', self.py_files) command += self._list_arg('--files', self.files) command += self._list_arg('--archives', self.archives) command += self._dict_arg('--conf', self.conf) command += self._text_arg('--properties-file', self.properties_file) command += self._text_arg('--driver-memory', self.driver_memory) command += self._text_arg('--driver-java-options', self.driver_java_options) command += self._text_arg('--driver-library-path', self.driver_library_path) command += self._text_arg('--driver-class-path', self.driver_class_path) command += self._text_arg('--executor-memory', self.executor_memory) command += self._text_arg('--driver-cores', self.driver_cores) command += self._flag_arg('--supervise', self.supervise) command += self._text_arg('--total-executor-cores', self.total_executor_cores) command += self._text_arg('--executor-cores', self.executor_cores) command += self._text_arg('--queue', self.queue) command += self._text_arg('--num-executors', self.num_executors) return command def app_command(self): if not self.app: raise NotImplementedError("subclass should define an app (.jar or .py file)") return [self.app] + self.app_options() def _list_config(self, config): if config and isinstance(config, six.string_types): return list(map(lambda x: x.strip(), config.split(','))) def _dict_config(self, config): if config and isinstance(config, six.string_types): return dict(map(lambda i: i.split('=', 1), config.split('|'))) def _text_arg(self, name, value): if value: return [name, value] return [] def _list_arg(self, name, value): if value and isinstance(value, (list, tuple)): return [name, ','.join(value)] return [] def _dict_arg(self, name, value): command = [] if value and isinstance(value, dict): for prop, value in value.items(): command += [name, '{0}={1}'.format(prop, value)] return command def _flag_arg(self, name, value): if value: return [name] return [] class PySparkTask(SparkSubmitTask): """ Template task for running an inline PySpark job Simply implement the ``main`` method in your subclass You can optionally define package names to be distributed to the cluster with ``py_packages`` (uses luigi's global py-packages configuration by default) """ # Path to the pyspark program passed to spark-submit app = os.path.join(os.path.dirname(__file__), 'pyspark_runner.py') # Python only supports the client deploy mode, force it deploy_mode = "client" @property def name(self): return self.__class__.__name__ @property def py_packages(self): packages = configuration.get_config().get('spark', 'py-packages', None) if packages: return map(lambda s: s.strip(), packages.split(',')) def setup(self, conf): """ Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext :param conf: SparkConf """ def setup_remote(self, sc): self._setup_packages(sc) def main(self, sc, *args): """ Called by the pyspark_runner with a SparkContext and any arguments returned by ``app_options()`` :param sc: SparkContext :param args: arguments list """ raise NotImplementedError("subclass should define a main method") def program_args(self): return self.spark_command() + self.app_command() def app_command(self): return [self.app, self.run_pickle] + self.app_options() def run(self): self.run_path = tempfile.mkdtemp(prefix=self.name) self.run_pickle = os.path.join(self.run_path, '.'.join([self.name.replace(' ', '_'), 'pickle'])) with open(self.run_pickle, 'wb') as fd: # Copy module file to run path. module_path = os.path.abspath(inspect.getfile(self.__class__)) shutil.copy(module_path, os.path.join(self.run_path, '.')) self._dump(fd) try: super(PySparkTask, self).run() finally: shutil.rmtree(self.run_path) def _dump(self, fd): with self.no_unpicklable_properties(): if self.__module__ == '__main__': d = pickle.dumps(self) module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0] d = d.replace(b'c__main__', b'c' + module_name.encode('ascii')) fd.write(d) else: pickle.dump(self, fd) def _setup_packages(self, sc): """ This method compresses and uploads packages to the cluster """ packages = self.py_packages if not packages: return for package in packages: mod = importlib.import_module(package) try: mod_path = mod.__path__[0] except AttributeError: mod_path = mod.__file__ tar_path = os.path.join(self.run_path, package + '.tar.gz') tar = tarfile.open(tar_path, "w:gz") tar.add(mod_path, os.path.basename(mod_path)) tar.close() sc.addPyFile(tar_path)
1
17,307
Was this an intentional deletion? Why not just allow overwrite of `deploy_mode`?
spotify-luigi
py
@@ -168,7 +168,16 @@ func (c *CStorVolumeReplicaController) syncHandler( // Synchronize cstor volume total allocated and // used capacity fields on CVR object. // Any kind of sync activity should be done from here. - c.syncCvr(cvrGot) + err = c.syncCvr(cvrGot) + if err != nil { + c.recorder.Event( + cvrGot, + corev1.EventTypeWarning, + "SyncFailed", + fmt.Sprintf("failed to sync CVR error: %s", err.Error()), + ) + return nil + } _, err = c.clientset. OpenebsV1alpha1().
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package replicacontroller import ( "encoding/json" "fmt" "os" "reflect" "strings" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" "github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned" "github.com/openebs/maya/pkg/debug" errors "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" "k8s.io/klog" ) const ( v130 = "1.3.0" ) type upgradeParams struct { cvr *apis.CStorVolumeReplica client clientset.Interface } type upgradeFunc func(u *upgradeParams) (*apis.CStorVolumeReplica, error) var ( upgradeMap = map[string]upgradeFunc{ "1.0.0": setReplicaID, "1.1.0": setReplicaID, "1.2.0": setReplicaID, } ) // CVRPatch struct represent the struct used to patch // the cvr object type CVRPatch struct { // Op defines the operation Op string `json:"op"` // Path defines the key path // eg. for // { // "Name": "openebs" // Category: { // "Inclusive": "v1", // "Rank": "A" // } // } // The path of 'Inclusive' would be // "/Name/Category/Inclusive" Path string `json:"path"` Value string `json:"value"` } // syncHandler handles CVR changes based on the provided // operation. It reconciles desired state of CVR with the // actual state. // // Finally, it updates CVR Status func (c *CStorVolumeReplicaController) syncHandler( key string, operation common.QueueOperation, ) error { cvrGot, err := c.getVolumeReplicaResource(key) if err != nil { return err } if cvrGot == nil { return errors.Errorf( "failed to reconcile cvr {%s}: object not found", key, ) } cvrGot, err = c.populateVersion(cvrGot) if err != nil { klog.Errorf("failed to add versionDetails to cvr %s:%s", cvrGot.Name, err.Error()) c.recorder.Event( cvrGot, corev1.EventTypeWarning, "FailedPopulate", fmt.Sprintf("Failed to add current version: %s", err.Error()), ) return nil } cvrGot, err = c.reconcileVersion(cvrGot) if err != nil { klog.Errorf("failed to upgrade cvr %s:%s", cvrGot.Name, err.Error()) c.recorder.Event( cvrGot, corev1.EventTypeWarning, "FailedUpgrade", fmt.Sprintf("Failed to upgrade cvr to %s version: %s", cvrGot.VersionDetails.Desired, err.Error(), ), ) cvrGot.VersionDetails.Status.SetErrorStatus( "Failed to reconcile cvr version", err, ) _, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace).Update(cvrGot) if err != nil { klog.Errorf("failed to update versionDetails status for cvr %s:%s", cvrGot.Name, err.Error()) } return nil } status, err := c.cVREventHandler(operation, cvrGot) if status == "" { // TODO // need to rethink on this logic !! // status holds more importance than error return nil } cvrGot.Status.LastUpdateTime = metav1.Now() if cvrGot.Status.Phase != apis.CStorVolumeReplicaPhase(status) { cvrGot.Status.LastTransitionTime = cvrGot.Status.LastUpdateTime // set phase based on received status cvrGot.Status.Phase = apis.CStorVolumeReplicaPhase(status) } // need to update cvr before returning this error if err != nil { if debug.EI.IsCVRUpdateErrorInjected() { return errors.Errorf("CVR update error via injection") } _, err1 := c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace). Update(cvrGot) if err1 != nil { return errors.Wrapf( err, "failed to reconcile cvr {%s}: failed to update cvr with phase {%s}: {%s}", key, cvrGot.Status.Phase, err1.Error(), ) } return errors.Wrapf(err, "failed to reconcile cvr {%s}", key) } // Synchronize cstor volume total allocated and // used capacity fields on CVR object. // Any kind of sync activity should be done from here. c.syncCvr(cvrGot) _, err = c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace). Update(cvrGot) if err != nil { return errors.Wrapf( err, "failed to reconcile cvr {%s}: failed to update cvr with phase {%s}", key, cvrGot.Status.Phase, ) } klog.V(4).Infof( "cvr {%s} reconciled successfully with current phase being {%s}", key, cvrGot.Status.Phase, ) return nil } func (c *CStorVolumeReplicaController) cVREventHandler( operation common.QueueOperation, cvrObj *apis.CStorVolumeReplica, ) (string, error) { err := volumereplica.CheckValidVolumeReplica(cvrObj) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate), ) return string(apis.CVRStatusOffline), err } // PoolNameHandler tries to get pool name and blocks for // particular number of attempts. var noOfAttempts = 2 if !common.PoolNameHandler(cvrObj, noOfAttempts) { return string(cvrObj.Status.Phase), errors.New("pool not found") } // cvr is created at zfs in the form poolname/volname fullVolName := volumereplica.PoolNameFromCVR(cvrObj) + "/" + cvrObj.Labels["cstorvolume.openebs.io/name"] switch operation { case common.QOpAdd: klog.Infof( "will process add event for cvr {%s} as volume {%s}", cvrObj.Name, fullVolName, ) status, err := c.cVRAddEventHandler(cvrObj, fullVolName) return status, err case common.QOpDestroy: klog.Infof( "will process delete event for cvr {%s} as volume {%s}", cvrObj.Name, fullVolName, ) err := volumereplica.DeleteVolume(fullVolName) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy), ) return string(apis.CVRStatusDeletionFailed), err } err = c.removeFinalizer(cvrObj) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureRemoveFinalizer), string(common.MessageResourceFailDestroy), ) return string(apis.CVRStatusDeletionFailed), err } return "", nil case common.QOpModify: fallthrough case common.QOpSync: klog.V(4).Infof( "will process sync event for cvr {%s} as volume {%s}", cvrObj.Name, operation, ) if isCVRCreateStatus(cvrObj) { return c.cVRAddEventHandler(cvrObj, fullVolName) } return c.getCVRStatus(cvrObj) } klog.Errorf( "failed to handle event for cvr {%s}: operation {%s} not supported", cvrObj.Name, string(operation), ) return string(apis.CVRStatusInvalid), nil } // removeFinalizer removes finalizers present in // CVR resource func (c *CStorVolumeReplicaController) removeFinalizer( cvrObj *apis.CStorVolumeReplica, ) error { cvrPatch := []CVRPatch{ CVRPatch{ Op: "remove", Path: "/metadata/finalizers", }, } cvrPatchBytes, err := json.Marshal(cvrPatch) if err != nil { return errors.Wrapf( err, "failed to remove finalizers from cvr {%s}", cvrObj.Name, ) } _, err = c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace). Patch(cvrObj.Name, types.JSONPatchType, cvrPatchBytes) if err != nil { return errors.Wrapf( err, "failed to remove finalizers from cvr {%s}", cvrObj.Name, ) } klog.Infof("finalizers removed successfully from cvr {%s}", cvrObj.Name) return nil } func (c *CStorVolumeReplicaController) cVRAddEventHandler( cVR *apis.CStorVolumeReplica, fullVolName string, ) (string, error) { var err error // lock is to synchronize pool and volumereplica. Until certain pool related // operations are over, the volumereplica threads will be held. common.SyncResources.Mux.Lock() if common.SyncResources.IsImported { common.SyncResources.Mux.Unlock() // To check if volume is already imported with pool. importedFlag := common.CheckForInitialImportedPoolVol( common.InitialImportedPoolVol, fullVolName, ) if importedFlag && !IsEmptyStatus(cVR) { klog.Infof( "CStorVolumeReplica %v is already imported", string(cVR.ObjectMeta.UID), ) c.recorder.Event( cVR, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported), ) // If the volume already present then get the status of replica from ZFS // and update it with corresponding status phase. If status gives error // then return old phase. return getVolumeReplicaStatus(cVR, fullVolName) } } else { common.SyncResources.Mux.Unlock() } // Below block will be useful when the only cstor-pool-mgmt gets restarted // then it is required to cross-check whether the volume exists or not. existingvol, _ := volumereplica.GetVolumes() if common.CheckIfPresent(existingvol, fullVolName) { klog.Warningf( "CStorVolumeReplica %v is already present", string(cVR.GetUID()), ) c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent), ) // After creating zfs datasets in zpool but update to etcd might be // failed if isEmptyReplicaID(cVR) { cVR.Spec.ReplicaID, err = volumereplica.GetReplicaIDFromZFS(fullVolName) if err != nil { // If error happened then update with same as with existing CVR // phase. So, in next reconciliation it will try to update with // proper changes return string(cVR.Status.Phase), errors.Wrapf(err, "volume replica %s exists", cVR.Name) } } // If the volume already present then get the status of replica from ZFS // and update it with corresponding status return getVolumeReplicaStatus(cVR, fullVolName) } //TODO: Follow best practice while refactor reconciliation logic if isCVRCreateStatus(cVR) { return c.createVolumeReplica(cVR, fullVolName) } return string(apis.CVRStatusOffline), fmt.Errorf( "VolumeReplica offline: %v, %v", cVR.Name, cVR.Labels["cstorvolume.openebs.io/name"], ) } // createVolumeReplica will do following things // 1. If replicaID is empty and if it is new volume generate replicaID. // 2. Trigger ZFS volume dataset create command on success get the status from // ZFS and update it. If `ZFS command` fails then return with same status phase // which is currently holding by CVR. func (c *CStorVolumeReplicaController) createVolumeReplica( cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { // Setting quorum to true for newly creating Volumes. var quorum = true if IsRecreateStatus(cVR) { klog.Infof( "Pool is recreated hence creating the volumes by setting off the quorum property", ) quorum = false } // We should generate replicaID for new volume replicas only if it doesn't has // replica ID. if isEmptyReplicaID(cVR) && (IsEmptyStatus(cVR) || IsInitStatus(cVR)) { if err := volumereplica.GenerateReplicaID(cVR); err != nil { klog.Errorf("cVR ReplicaID creation failure: %v", err.Error()) return string(cVR.Status.Phase), err } } if len(cVR.Spec.ReplicaID) == 0 { return string(cVR.Status.Phase), errors.New("ReplicaID is not set") } err := volumereplica.CreateVolumeReplica(cVR, fullVolName, quorum) if err != nil { klog.Errorf("cVR creation failure: %v", err.Error()) c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.FailureCreate), fmt.Sprintf("failed to create volume replica error: %v", err.Error()), ) return string(cVR.Status.Phase), err } c.recorder.Event( cVR, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated), ) klog.Infof( "cVR creation successful: %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID()), ) return getVolumeReplicaStatus(cVR, fullVolName) } // getVolumeReplicaStatus return the status of replica after executing ZFS // stats command and return previous state and error if any error occured while // getting the status from ZFS func getVolumeReplicaStatus( cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { status, err := volumereplica.Status(fullVolName) if err != nil { return string(cVR.Status.Phase), err } return status, nil } // getVolumeReplicaResource returns object corresponding to the resource key func (c *CStorVolumeReplicaController) getVolumeReplicaResource( key string, ) (*apis.CStorVolumeReplica, error) { // Convert the key(namespace/name) string into a distinct name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil, nil } cStorVolumeReplicaUpdated, err := c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(namespace). Get(name, metav1.GetOptions{}) if err != nil { // The cStorPool resource may no longer exist, in which case we stop // processing. if k8serrors.IsNotFound(err) { runtime.HandleError( fmt.Errorf( "cStorVolumeReplicaUpdated '%s' in work queue no longer exists", key, ), ) return nil, nil } return nil, err } return cStorVolumeReplicaUpdated, nil } // IsRightCStorVolumeReplica is to check if the cvr // request is for particular pod/application. func IsRightCStorVolumeReplica(cVR *apis.CStorVolumeReplica) bool { if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"])) != "" { return os.Getenv(string(common.OpenEBSIOCStorID)) == string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"]) } if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"])) != "" { return os.Getenv(string(common.OpenEBSIOCSPIID)) == string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"]) } return false } // IsDestroyEvent is to check if the call is for CStorVolumeReplica destroy. func IsDestroyEvent(cVR *apis.CStorVolumeReplica) bool { if cVR.ObjectMeta.DeletionTimestamp != nil { return true } return false } // IsOnlyStatusChange is to check only status change of cStorVolumeReplica object. func IsOnlyStatusChange(oldCVR, newCVR *apis.CStorVolumeReplica) bool { if reflect.DeepEqual(oldCVR.Spec, newCVR.Spec) && !reflect.DeepEqual(oldCVR.Status, newCVR.Status) { return true } return false } // IsDeletionFailedBefore flags if status of // cvr is CVRStatusDeletionFailed func IsDeletionFailedBefore(cvrObj *apis.CStorVolumeReplica) bool { return cvrObj.Status.Phase == apis.CVRStatusDeletionFailed } // IsOnlineStatus is to check if the status of cStorVolumeReplica object is // Healthy. func IsOnlineStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusOnline) { klog.Infof("cVR Healthy status: %v", string(cVR.ObjectMeta.UID)) return true } klog.Infof( "cVR '%s': uid '%s': phase '%s': is_healthy_status: false", string(cVR.ObjectMeta.Name), string(cVR.ObjectMeta.UID), cVR.Status.Phase, ) return false } // IsEmptyStatus is to check if the status of cStorVolumeReplica object is empty. func IsEmptyStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusEmpty) { klog.Infof("cVR empty status: %v", string(cVR.ObjectMeta.UID)) return true } klog.Infof( "cVR '%s': uid '%s': phase '%s': is_empty_status: false", string(cVR.ObjectMeta.Name), string(cVR.ObjectMeta.UID), cVR.Status.Phase, ) return false } // IsInitStatus is to check if the status of cStorVolumeReplica object is pending. func IsInitStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusInit) { klog.Infof("cVR pending: %v", string(cVR.ObjectMeta.UID)) return true } klog.V(4).Infof("Not pending status: %v", string(cVR.ObjectMeta.UID)) return false } // IsRecreateStatus is to check if the status of cStorVolumeReplica object is // in recreated state. func IsRecreateStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusRecreate) { klog.Infof("cVR Recreate: %v", string(cVR.ObjectMeta.UID)) return true } klog.V(4).Infof("Not Recreate status: %v", string(cVR.ObjectMeta.UID)) return false } // isCVRCreateStatus returns true if volume replica needs to be created else // return false func isCVRCreateStatus(cVR *apis.CStorVolumeReplica) bool { cVRStatus := string(cVR.Status.Phase) if strings.EqualFold(cVRStatus, string(apis.CVRStatusEmpty)) || strings.EqualFold(cVRStatus, string(apis.CVRStatusRecreate)) || strings.EqualFold(cVRStatus, string(apis.CVRStatusInit)) { return true } return false } func isEmptyReplicaID(cVR *apis.CStorVolumeReplica) bool { return cVR.Spec.ReplicaID == "" } // getCVRStatus is a wrapper that fetches the status of cstor volume. func (c *CStorVolumeReplicaController) getCVRStatus( cVR *apis.CStorVolumeReplica, ) (string, error) { volumeName, err := volumereplica.GetVolumeName(cVR) if err != nil { return "", fmt.Errorf("unable to get volume name:%s", err.Error()) } replicaStatus, err := volumereplica.Status(volumeName) if err != nil { // ToDO : Put error in event recorder c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.FailureStatusSync), string(common.MessageResourceFailStatusSync), ) return "", err } return replicaStatus, nil } // syncCvr updates field on CVR object after fetching the values from zfs utility. func (c *CStorVolumeReplicaController) syncCvr(cvr *apis.CStorVolumeReplica) { // Get the zfs volume name corresponding to this cvr. volumeName, err := volumereplica.GetVolumeName(cvr) if err != nil { klog.Errorf("Unable to sync CVR capacity: %v", err) c.recorder.Event( cvr, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync), ) } // Get capacity of the volume. capacity, err := volumereplica.Capacity(volumeName) if err != nil { klog.Errorf("Unable to sync CVR capacity: %v", err) c.recorder.Event( cvr, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync), ) } else { cvr.Status.Capacity = *capacity } if os.Getenv(string(common.RebuildEstimates)) == "true" { err = volumereplica.GetAndUpdateSnapshotInfo(c.clientset, cvr) if err != nil { c.recorder.Event( cvr, corev1.EventTypeWarning, "SnapshotList", fmt.Sprintf("Unable to update snapshot list ddetails in cvr status err: %v", err), ) } } } func (c *CStorVolumeReplicaController) reconcileVersion(cvr *apis.CStorVolumeReplica) ( *apis.CStorVolumeReplica, error, ) { var err error // the below code uses deep copy to have the state of object just before // any update call is done so that on failure the last state object can be returned if cvr.VersionDetails.Status.Current != cvr.VersionDetails.Desired { if !apis.IsCurrentVersionValid(cvr.VersionDetails.Status.Current) { return cvr, errors.Errorf("invalid current version %s", cvr.VersionDetails.Status.Current) } if !apis.IsDesiredVersionValid(cvr.VersionDetails.Desired) { return cvr, errors.Errorf("invalid desired version %s", cvr.VersionDetails.Desired) } cvrObj := cvr.DeepCopy() if cvrObj.VersionDetails.Status.State != apis.ReconcileInProgress { cvrObj.VersionDetails.Status.SetInProgressStatus() cvrObj, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } } path := strings.Split(cvrObj.VersionDetails.Status.Current, "-")[0] u := &upgradeParams{ cvr: cvrObj, client: c.clientset, } // Get upgrade function for corresponding path, if path does not // exits then no upgrade is required and funcValue will be nil. funcValue := upgradeMap[path] if funcValue != nil { cvrObj, err = funcValue(u) if err != nil { return cvrObj, err } } cvr = cvrObj.DeepCopy() cvrObj.VersionDetails.SetSuccessStatus() cvrObj, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } return cvrObj, nil } return cvr, nil } // populateVersion assigns VersionDetails for old cvr object func (c *CStorVolumeReplicaController) populateVersion(cvr *apis.CStorVolumeReplica) ( *apis.CStorVolumeReplica, error, ) { v := cvr.Labels[string(apis.OpenEBSVersionKey)] // 1.3.0 onwards new CVR will have the field populated during creation if v < v130 && cvr.VersionDetails.Status.Current == "" { cvrObj := cvr.DeepCopy() cvrObj.VersionDetails.Status.Current = v cvrObj.VersionDetails.Desired = v cvrObj, err := c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cvrObj.Namespace). Update(cvrObj) if err != nil { return cvr, err } klog.Infof("Version %s added on cvr %s", v, cvrObj.Name) return cvrObj, nil } return cvr, nil } // setReplicaID sets the replica_id if not present for old cvrs when // they are upgraded to version 1.3.0 or above. func setReplicaID(u *upgradeParams) (*apis.CStorVolumeReplica, error) { cvr := u.cvr cvrObj := cvr.DeepCopy() err := volumereplica.GetAndUpdateReplicaID(cvrObj) if err != nil { return cvr, err } cvrObj, err = u.client.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } return cvrObj, nil }
1
18,279
better to call it as syncCVRStatus.. just syncCVR is very confusing
openebs-maya
go
@@ -29,11 +29,14 @@ var ( errAppDeleteCancelled = errors.New("app delete cancelled - no changes made") ) -type deleteAppOpts struct { - // Flags or arguments that are user inputs. +type deleteAppVars struct { *GlobalOpts SkipConfirmation bool AppName string +} + +type deleteAppOpts struct { + deleteAppVars // Interfaces to dependencies. projectService projectService
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/ecr" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/store" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log" termprogress "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/progress" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace" "github.com/spf13/cobra" ) const ( appDeleteNamePrompt = "Which application would you like to delete?" appDeleteConfirmPrompt = "Are you sure you want to delete %s from project %s?" appDeleteConfirmHelp = "This will undeploy the app from all environments, delete the local workspace file, and remove ECR repositories." ) var ( errAppDeleteCancelled = errors.New("app delete cancelled - no changes made") ) type deleteAppOpts struct { // Flags or arguments that are user inputs. *GlobalOpts SkipConfirmation bool AppName string // Interfaces to dependencies. projectService projectService workspaceService archer.Workspace sessProvider sessionProvider spinner progress // Internal state. projectEnvironments []*archer.Environment } func newDeleteAppOpts() (*deleteAppOpts, error) { workspaceService, err := workspace.New() if err != nil { return nil, fmt.Errorf("intialize workspace service: %w", err) } projectService, err := store.New() if err != nil { return nil, fmt.Errorf("create project service: %w", err) } return &deleteAppOpts{ GlobalOpts: NewGlobalOpts(), spinner: termprogress.NewSpinner(), sessProvider: session.NewProvider(), workspaceService: workspaceService, projectService: projectService, }, nil } // Validate returns an error if the user inputs are invalid. func (o *deleteAppOpts) Validate() error { if o.ProjectName() == "" { return errNoProjectInWorkspace } if o.AppName != "" { if _, err := o.projectService.GetApplication(o.ProjectName(), o.AppName); err != nil { return err } } return nil } // Ask prompts the user for any required flags. func (o *deleteAppOpts) Ask() error { if err := o.askAppName(); err != nil { return err } if o.SkipConfirmation { return nil } deleteConfirmed, err := o.prompt.Confirm( fmt.Sprintf(appDeleteConfirmPrompt, o.AppName, o.projectName), appDeleteConfirmHelp) if err != nil { return fmt.Errorf("app delete confirmation prompt: %w", err) } if !deleteConfirmed { return errAppDeleteCancelled } return nil } // Execute deletes the application's CloudFormation stack, ECR repository, SSM parameter, and local file. func (o *deleteAppOpts) Execute() error { if err := o.sourceProjectEnvironments(); err != nil { return err } if err := o.deleteStacks(); err != nil { return err } if err := o.emptyECRRepos(); err != nil { return err } if err := o.removeAppProjectResources(); err != nil { return err } if err := o.deleteSSMParam(); err != nil { return err } if err := o.deleteWorkspaceFile(); err != nil { return err } log.Successf("Deleted app %s from project %s.\n", o.AppName, o.projectName) return nil } func (o *deleteAppOpts) askAppName() error { if o.AppName != "" { return nil } names, err := o.retrieveAppNames() if err != nil { return err } if len(names) == 0 { return fmt.Errorf("couldn't find any application in the project %s", o.ProjectName()) } name, err := o.prompt.SelectOne(appDeleteNamePrompt, "", names) if err != nil { return fmt.Errorf("select application to delete: %w", err) } o.AppName = name return nil } func (o *deleteAppOpts) retrieveAppNames() ([]string, error) { apps, err := o.projectService.ListApplications(o.ProjectName()) if err != nil { return nil, fmt.Errorf("get app names: %w", err) } var names []string for _, app := range apps { names = append(names, app.Name) } return names, nil } func (o *deleteAppOpts) sourceProjectEnvironments() error { envs, err := o.projectService.ListEnvironments(o.ProjectName()) if err != nil { return fmt.Errorf("get environments: %w", err) } o.projectEnvironments = envs return nil } func (o *deleteAppOpts) deleteStacks() error { for _, env := range o.projectEnvironments { sess, err := o.sessProvider.FromRole(env.ManagerRoleARN, env.Region) if err != nil { return err } cfClient := cloudformation.New(sess) stackName := fmt.Sprintf("%s-%s-%s", o.projectName, env.Name, o.AppName) o.spinner.Start(fmt.Sprintf("Deleting app %s from env %s.", o.AppName, env.Name)) if err := cfClient.DeleteStackAndWait(stackName); err != nil { o.spinner.Stop(log.Serrorf("Deleting app %s from env %s.", o.AppName, env.Name)) return err } o.spinner.Stop(log.Ssuccessf("Deleted app %s from env %s.", o.AppName, env.Name)) } return nil } func (o *deleteAppOpts) emptyECRRepos() error { var uniqueRegions []string for _, env := range o.projectEnvironments { if !contains(env.Region, uniqueRegions) { uniqueRegions = append(uniqueRegions, env.Region) } } // TODO: centralized ECR repo name repoName := fmt.Sprintf("%s/%s", o.projectName, o.AppName) for _, region := range uniqueRegions { sess, err := o.sessProvider.DefaultWithRegion(region) if err != nil { return err } ecrService := ecr.New(sess) if err := ecrService.ClearRepository(repoName); err != nil { return err } } return nil } func (o *deleteAppOpts) removeAppProjectResources() error { proj, err := o.projectService.GetProject(o.projectName) if err != nil { return err } sess, err := o.sessProvider.Default() if err != nil { return err } // TODO: make this opts.toolsAccountCfClient... cfClient := cloudformation.New(sess) o.spinner.Start(fmt.Sprintf("Deleting app %s resources from project %s.", o.AppName, o.projectName)) if err := cfClient.RemoveAppFromProject(proj, o.AppName); err != nil { if !isStackSetNotExistsErr(err) { o.spinner.Stop(log.Serrorf("Deleting app %s resources from project %s.", o.AppName, o.projectName)) return err } } o.spinner.Stop(log.Ssuccessf("Deleted app %s resources from project %s.", o.AppName, o.projectName)) return nil } func (o *deleteAppOpts) deleteSSMParam() error { if err := o.projectService.DeleteApplication(o.projectName, o.AppName); err != nil { return fmt.Errorf("delete app %s from project %s: %w", o.AppName, o.projectName, err) } return nil } func (o *deleteAppOpts) deleteWorkspaceFile() error { // Return if manifest does not exist. if err := o.workspaceService.DeleteFile(o.AppName); err != nil { return fmt.Errorf("delete app file %s: %w", o.AppName, err) } return nil } // RecommendedActions returns follow-up actions the user can take after successfully executing the command. func (o *deleteAppOpts) RecommendedActions() []string { // TODO: Add recommendation to do `pipeline delete` when it is available return []string{ fmt.Sprintf("Run %s to update the corresponding pipeline if it exists.", color.HighlightCode(fmt.Sprintf("ecs-preview pipeline update"))), } } // BuildAppDeleteCmd builds the command to delete application(s). func BuildAppDeleteCmd() *cobra.Command { opts := &deleteAppOpts{ GlobalOpts: NewGlobalOpts(), spinner: termprogress.NewSpinner(), sessProvider: session.NewProvider(), } cmd := &cobra.Command{ Use: "delete", Short: "Deletes an application from your project.", Example: ` Delete the "test" application. /code $ ecs-preview app delete --name test Delete the "test" application without prompting. /code $ ecs-preview app delete --name test --yes`, PreRunE: runCmdE(func(cmd *cobra.Command, args []string) error { workspaceService, err := workspace.New() if err != nil { return fmt.Errorf("intialize workspace service: %w", err) } opts.workspaceService = workspaceService projectService, err := store.New() if err != nil { return fmt.Errorf("create project service: %w", err) } opts.projectService = projectService return nil }), RunE: runCmdE(func(cmd *cobra.Command, args []string) error { if err := opts.Validate(); err != nil { return err } if err := opts.Ask(); err != nil { return err } if err := opts.Execute(); err != nil { return err } log.Infoln("Recommended follow-up actions:") for _, followup := range opts.RecommendedActions() { log.Infof("- %s\n", followup) } return nil }), } cmd.Flags().StringVarP(&opts.AppName, nameFlag, nameFlagShort, "", appFlagDescription) cmd.Flags().BoolVar(&opts.SkipConfirmation, yesFlag, false, yesFlagDescription) return cmd }
1
11,910
should we make this not a pointer? so that when we embed the struct a copy is made instead of modifying the original object.
aws-copilot-cli
go
@@ -99,9 +99,9 @@ class BaseWebTest(object): def get_app_settings(self, additional_settings=None): settings = DEFAULT_SETTINGS.copy() - settings['storage_backend'] = 'kinto.core.storage.redis' - settings['cache_backend'] = 'kinto.core.cache.redis' - settings['permission_backend'] = 'kinto.core.permission.redis' + settings['storage_backend'] = 'kinto_redis.storage' + settings['cache_backend'] = 'kinto_redis.cache' + settings['permission_backend'] = 'kinto_redis.permission' settings['project_name'] = 'myapp' settings['project_version'] = '0.0.1'
1
from collections import defaultdict import mock import os import threading import functools try: import unittest2 as unittest except ImportError: import unittest # NOQA import webtest from cornice import errors as cornice_errors from pyramid.url import parse_url_overrides from pyramid.security import IAuthorizationPolicy, Authenticated, Everyone from zope.interface import implementer from enum import Enum from kinto.core import DEFAULT_SETTINGS from kinto.core.authorization import PRIVATE from kinto.core.storage import generators from kinto.tests.core.testapp import main as testapp from kinto.core.utils import sqlalchemy, follow_subrequest # This is the principal a connected user should have (in the tests). USER_PRINCIPAL = ('basicauth:9f2d363f98418b13253d6d7193fc88690302' 'ab0ae21295521f6029dffe9dc3b0') class DummyRequest(mock.MagicMock): def __init__(self, *args, **kwargs): super(DummyRequest, self).__init__(*args, **kwargs) self.upath_info = '/v0/' self.registry = mock.MagicMock(settings=DEFAULT_SETTINGS.copy()) self.registry.id_generators = defaultdict(generators.UUID4) self.GET = {} self.headers = {} self.errors = cornice_errors.Errors(request=self) self.authenticated_userid = 'bob' self.authn_type = 'basicauth' self.prefixed_userid = 'basicauth:bob' self.json = {} self.validated = {} self.matchdict = {} self.response = mock.MagicMock(headers={}) def route_url(*a, **kw): # XXX: refactor DummyRequest to take advantage of `pyramid.testing` parts = parse_url_overrides(kw) return ''.join([p for p in parts if p]) self.route_url = route_url follow_subrequest = follow_subrequest def get_request_class(prefix): class PrefixedRequestClass(webtest.app.TestRequest): @classmethod def blank(cls, path, *args, **kwargs): if prefix: path = '/%s%s' % (prefix, path) return webtest.app.TestRequest.blank(path, *args, **kwargs) return PrefixedRequestClass class BaseWebTest(object): """Base Web Test to test your cornice service. It setups the database before each test and delete it after. """ api_prefix = "v0" authorization_policy = 'kinto.tests.core.support.AllowAuthorizationPolicy' collection_url = '/mushrooms' principal = USER_PRINCIPAL def __init__(self, *args, **kwargs): super(BaseWebTest, self).__init__(*args, **kwargs) self.app = self.make_app() self.storage = self.app.app.registry.storage self.cache = self.app.app.registry.cache self.permission = self.app.app.registry.permission self.headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic bWF0OjE=' } def make_app(self, settings=None, config=None): wsgi_app = testapp(self.get_app_settings(settings), config=config) app = webtest.TestApp(wsgi_app) app.RequestClass = get_request_class(self.api_prefix) return app def get_app_settings(self, additional_settings=None): settings = DEFAULT_SETTINGS.copy() settings['storage_backend'] = 'kinto.core.storage.redis' settings['cache_backend'] = 'kinto.core.cache.redis' settings['permission_backend'] = 'kinto.core.permission.redis' settings['project_name'] = 'myapp' settings['project_version'] = '0.0.1' settings['project_docs'] = 'https://kinto.readthedocs.io/' settings['multiauth.authorization_policy'] = self.authorization_policy if additional_settings is not None: settings.update(additional_settings) return settings def get_item_url(self, id=None): """Return the URL of the item using self.item_url.""" if id is None: id = self.record['id'] return self.collection_url + '/' + str(id) def tearDown(self): super(BaseWebTest, self).tearDown() self.storage.flush() self.cache.flush() self.permission.flush() class ThreadMixin(object): def setUp(self): super(ThreadMixin, self).setUp() self._threads = [] def tearDown(self): super(ThreadMixin, self).tearDown() for thread in self._threads: thread.join() def _create_thread(self, *args, **kwargs): thread = threading.Thread(*args, **kwargs) self._threads.append(thread) return thread class FormattedErrorMixin(object): def assertFormattedError(self, response, code, errno, error, message=None, info=None): # make sure we translate Enum instances to their values if isinstance(error, Enum): error = error.value if isinstance(errno, Enum): errno = errno.value self.assertEqual(response.headers['Content-Type'], 'application/json; charset=UTF-8') self.assertEqual(response.json['code'], code) self.assertEqual(response.json['errno'], errno) self.assertEqual(response.json['error'], error) if message is not None: self.assertIn(message, response.json['message']) else: self.assertNotIn('message', response.json) if info is not None: self.assertIn(info, response.json['info']) else: self.assertNotIn('info', response.json) @implementer(IAuthorizationPolicy) class AllowAuthorizationPolicy(object): def permits(self, context, principals, permission): if permission == PRIVATE: return Authenticated in principals if Everyone in principals: return True # Kinto-Core default authz policy uses prefixed_userid. prefixed = [context.prefixed_userid] return USER_PRINCIPAL in (principals + prefixed) def principals_allowed_by_permission(self, context, permission): raise NotImplementedError() # PRAGMA NOCOVER def authorize(permits=True, authz_class=None): """Patch the default authorization policy to return what is specified in :param:permits. """ if authz_class is None: authz_class = 'kinto.tests.core.support.AllowAuthorizationPolicy' def wrapper(f): @functools.wraps(f) def wrapped(*args, **kwargs): with mock.patch( '%s.permits' % authz_class, return_value=permits): return f(*args, **kwargs) return wrapped return wrapper skip_if_travis = unittest.skipIf('TRAVIS' in os.environ, "travis") skip_if_no_postgresql = unittest.skipIf(sqlalchemy is None, "postgresql is not installed.")
1
9,655
Does this mean that `kinto_redis` is required to run tests?
Kinto-kinto
py
@@ -475,7 +475,7 @@ function mergeBatchResults(batch, bulkResult, err, result) { if (Array.isArray(result.writeErrors)) { for (let i = 0; i < result.writeErrors.length; i++) { const writeError = { - index: batch.originalZeroIndex + result.writeErrors[i].index, + index: batch.originalIndexes[i], code: result.writeErrors[i].code, errmsg: result.writeErrors[i].errmsg, op: batch.operations[result.writeErrors[i].index]
1
'use strict'; const Long = require('../core').BSON.Long; const MongoError = require('../core').MongoError; const ObjectID = require('../core').BSON.ObjectID; const BSON = require('../core').BSON; const MongoWriteConcernError = require('../core').MongoWriteConcernError; const toError = require('../utils').toError; const handleCallback = require('../utils').handleCallback; const applyRetryableWrites = require('../utils').applyRetryableWrites; const applyWriteConcern = require('../utils').applyWriteConcern; const executeLegacyOperation = require('../utils').executeLegacyOperation; const isPromiseLike = require('../utils').isPromiseLike; // Error codes const WRITE_CONCERN_ERROR = 64; // Insert types const INSERT = 1; const UPDATE = 2; const REMOVE = 3; const bson = new BSON([ BSON.Binary, BSON.Code, BSON.DBRef, BSON.Decimal128, BSON.Double, BSON.Int32, BSON.Long, BSON.Map, BSON.MaxKey, BSON.MinKey, BSON.ObjectId, BSON.BSONRegExp, BSON.Symbol, BSON.Timestamp ]); /** * Keeps the state of a unordered batch so we can rewrite the results * correctly after command execution * @ignore */ class Batch { constructor(batchType, originalZeroIndex) { this.originalZeroIndex = originalZeroIndex; this.currentIndex = 0; this.originalIndexes = []; this.batchType = batchType; this.operations = []; this.size = 0; this.sizeBytes = 0; } } /** * @classdesc * The result of a bulk write. */ class BulkWriteResult { /** * Create a new BulkWriteResult instance * * **NOTE:** Internal Type, do not instantiate directly */ constructor(bulkResult) { this.result = bulkResult; } /** * Evaluates to true if the bulk operation correctly executes * @type {boolean} */ get ok() { return this.result.ok; } /** * The number of inserted documents * @type {number} */ get nInserted() { return this.result.nInserted; } /** * Number of upserted documents * @type {number} */ get nUpserted() { return this.result.nUpserted; } /** * Number of matched documents * @type {number} */ get nMatched() { return this.result.nMatched; } /** * Number of documents updated physically on disk * @type {number} */ get nModified() { return this.result.nModified; } /** * Number of removed documents * @type {number} */ get nRemoved() { return this.result.nRemoved; } /** * Returns an array of all inserted ids * * @return {object[]} */ getInsertedIds() { return this.result.insertedIds; } /** * Returns an array of all upserted ids * * @return {object[]} */ getUpsertedIds() { return this.result.upserted; } /** * Returns the upserted id at the given index * * @param {number} index the number of the upserted id to return, returns undefined if no result for passed in index * @return {object} */ getUpsertedIdAt(index) { return this.result.upserted[index]; } /** * Returns raw internal result * * @return {object} */ getRawResponse() { return this.result; } /** * Returns true if the bulk operation contains a write error * * @return {boolean} */ hasWriteErrors() { return this.result.writeErrors.length > 0; } /** * Returns the number of write errors off the bulk operation * * @return {number} */ getWriteErrorCount() { return this.result.writeErrors.length; } /** * Returns a specific write error object * * @param {number} index of the write error to return, returns null if there is no result for passed in index * @return {WriteError} */ getWriteErrorAt(index) { if (index < this.result.writeErrors.length) { return this.result.writeErrors[index]; } return null; } /** * Retrieve all write errors * * @return {WriteError[]} */ getWriteErrors() { return this.result.writeErrors; } /** * Retrieve lastOp if available * * @return {object} */ getLastOp() { return this.result.lastOp; } /** * Retrieve the write concern error if any * * @return {WriteConcernError} */ getWriteConcernError() { if (this.result.writeConcernErrors.length === 0) { return null; } else if (this.result.writeConcernErrors.length === 1) { // Return the error return this.result.writeConcernErrors[0]; } else { // Combine the errors let errmsg = ''; for (let i = 0; i < this.result.writeConcernErrors.length; i++) { const err = this.result.writeConcernErrors[i]; errmsg = errmsg + err.errmsg; // TODO: Something better if (i === 0) errmsg = errmsg + ' and '; } return new WriteConcernError({ errmsg: errmsg, code: WRITE_CONCERN_ERROR }); } } /** * @return {object} */ toJSON() { return this.result; } /** * @return {string} */ toString() { return `BulkWriteResult(${this.toJSON(this.result)})`; } /** * @return {boolean} */ isOk() { return this.result.ok === 1; } } /** * @classdesc An error representing a failure by the server to apply the requested write concern to the bulk operation. */ class WriteConcernError { /** * Create a new WriteConcernError instance * * **NOTE:** Internal Type, do not instantiate directly */ constructor(err) { this.err = err; } /** * Write concern error code. * @type {number} */ get code() { return this.err.code; } /** * Write concern error message. * @type {string} */ get errmsg() { return this.err.errmsg; } /** * @return {object} */ toJSON() { return { code: this.err.code, errmsg: this.err.errmsg }; } /** * @return {string} */ toString() { return `WriteConcernError(${this.err.errmsg})`; } } /** * @classdesc An error that occurred during a BulkWrite on the server. */ class WriteError { /** * Create a new WriteError instance * * **NOTE:** Internal Type, do not instantiate directly */ constructor(err) { this.err = err; } /** * WriteError code. * @type {number} */ get code() { return this.err.code; } /** * WriteError original bulk operation index. * @type {number} */ get index() { return this.err.index; } /** * WriteError message. * @type {string} */ get errmsg() { return this.err.errmsg; } /** * Returns the underlying operation that caused the error * @return {object} */ getOperation() { return this.err.op; } /** * @return {object} */ toJSON() { return { code: this.err.code, index: this.err.index, errmsg: this.err.errmsg, op: this.err.op }; } /** * @return {string} */ toString() { return `WriteError(${JSON.stringify(this.toJSON())})`; } } /** * Merges results into shared data structure * @ignore */ function mergeBatchResults(batch, bulkResult, err, result) { // If we have an error set the result to be the err object if (err) { result = err; } else if (result && result.result) { result = result.result; } else if (result == null) { return; } // Do we have a top level error stop processing and return if (result.ok === 0 && bulkResult.ok === 1) { bulkResult.ok = 0; const writeError = { index: 0, code: result.code || 0, errmsg: result.message, op: batch.operations[0] }; bulkResult.writeErrors.push(new WriteError(writeError)); return; } else if (result.ok === 0 && bulkResult.ok === 0) { return; } // Deal with opTime if available if (result.opTime || result.lastOp) { const opTime = result.lastOp || result.opTime; let lastOpTS = null; let lastOpT = null; // We have a time stamp if (opTime && opTime._bsontype === 'Timestamp') { if (bulkResult.lastOp == null) { bulkResult.lastOp = opTime; } else if (opTime.greaterThan(bulkResult.lastOp)) { bulkResult.lastOp = opTime; } } else { // Existing TS if (bulkResult.lastOp) { lastOpTS = typeof bulkResult.lastOp.ts === 'number' ? Long.fromNumber(bulkResult.lastOp.ts) : bulkResult.lastOp.ts; lastOpT = typeof bulkResult.lastOp.t === 'number' ? Long.fromNumber(bulkResult.lastOp.t) : bulkResult.lastOp.t; } // Current OpTime TS const opTimeTS = typeof opTime.ts === 'number' ? Long.fromNumber(opTime.ts) : opTime.ts; const opTimeT = typeof opTime.t === 'number' ? Long.fromNumber(opTime.t) : opTime.t; // Compare the opTime's if (bulkResult.lastOp == null) { bulkResult.lastOp = opTime; } else if (opTimeTS.greaterThan(lastOpTS)) { bulkResult.lastOp = opTime; } else if (opTimeTS.equals(lastOpTS)) { if (opTimeT.greaterThan(lastOpT)) { bulkResult.lastOp = opTime; } } } } // If we have an insert Batch type if (batch.batchType === INSERT && result.n) { bulkResult.nInserted = bulkResult.nInserted + result.n; } // If we have an insert Batch type if (batch.batchType === REMOVE && result.n) { bulkResult.nRemoved = bulkResult.nRemoved + result.n; } let nUpserted = 0; // We have an array of upserted values, we need to rewrite the indexes if (Array.isArray(result.upserted)) { nUpserted = result.upserted.length; for (let i = 0; i < result.upserted.length; i++) { bulkResult.upserted.push({ index: result.upserted[i].index + batch.originalZeroIndex, _id: result.upserted[i]._id }); } } else if (result.upserted) { nUpserted = 1; bulkResult.upserted.push({ index: batch.originalZeroIndex, _id: result.upserted }); } // If we have an update Batch type if (batch.batchType === UPDATE && result.n) { const nModified = result.nModified; bulkResult.nUpserted = bulkResult.nUpserted + nUpserted; bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted); if (typeof nModified === 'number') { bulkResult.nModified = bulkResult.nModified + nModified; } else { bulkResult.nModified = null; } } if (Array.isArray(result.writeErrors)) { for (let i = 0; i < result.writeErrors.length; i++) { const writeError = { index: batch.originalZeroIndex + result.writeErrors[i].index, code: result.writeErrors[i].code, errmsg: result.writeErrors[i].errmsg, op: batch.operations[result.writeErrors[i].index] }; bulkResult.writeErrors.push(new WriteError(writeError)); } } if (result.writeConcernError) { bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError)); } } function executeCommands(bulkOperation, options, callback) { if (bulkOperation.s.batches.length === 0) { return handleCallback(callback, null, new BulkWriteResult(bulkOperation.s.bulkResult)); } const batch = bulkOperation.s.batches.shift(); function resultHandler(err, result) { // Error is a driver related error not a bulk op error, terminate if (((err && err.driver) || (err && err.message)) && !(err instanceof MongoWriteConcernError)) { return handleCallback(callback, err); } // If we have and error if (err) err.ok = 0; if (err instanceof MongoWriteConcernError) { return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, err, callback); } // Merge the results together const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult); const mergeResult = mergeBatchResults(batch, bulkOperation.s.bulkResult, err, result); if (mergeResult != null) { return handleCallback(callback, null, writeResult); } if (bulkOperation.handleWriteError(callback, writeResult)) return; // Execute the next command in line executeCommands(bulkOperation, options, callback); } bulkOperation.finalOptionsHandler({ options, batch, resultHandler }, callback); } /** * handles write concern error * * @ignore * @param {object} batch * @param {object} bulkResult * @param {boolean} ordered * @param {WriteConcernError} err * @param {function} callback */ function handleMongoWriteConcernError(batch, bulkResult, err, callback) { mergeBatchResults(batch, bulkResult, null, err.result); const wrappedWriteConcernError = new WriteConcernError({ errmsg: err.result.writeConcernError.errmsg, code: err.result.writeConcernError.result }); return handleCallback( callback, new BulkWriteError(toError(wrappedWriteConcernError), new BulkWriteResult(bulkResult)), null ); } /** * @classdesc An error indicating an unsuccessful Bulk Write */ class BulkWriteError extends MongoError { /** * Creates a new BulkWriteError * * @param {Error|string|object} message The error message * @param {BulkWriteResult} result The result of the bulk write operation * @extends {MongoError} */ constructor(error, result) { const message = error.err || error.errmsg || error.errMessage || error; super(message); Object.assign(this, error); this.name = 'BulkWriteError'; this.result = result; } } /** * @classdesc A builder object that is returned from {@link BulkOperationBase#find}. * Is used to build a write operation that involves a query filter. */ class FindOperators { /** * Creates a new FindOperators object. * * **NOTE:** Internal Type, do not instantiate directly * @param {OrderedBulkOperation|UnorderedBulkOperation} bulkOperation */ constructor(bulkOperation) { this.s = bulkOperation.s; } /** * Add a multiple update operation to the bulk operation * * @method * @param {object} updateDocument An update field for an update operation. See {@link https://docs.mongodb.com/manual/reference/command/update/#update-command-u u documentation} * @param {object} [options.hint] An optional hint for query optimization. See the {@link https://docs.mongodb.com/manual/reference/command/update/#update-command-hint|update command} reference for more information. * @throws {MongoError} If operation cannot be added to bulk write * @return {OrderedBulkOperation|UnorderedBulkOperation} A reference to the parent BulkOperation */ update(updateDocument) { // Perform upsert const upsert = typeof this.s.currentOp.upsert === 'boolean' ? this.s.currentOp.upsert : false; // Establish the update command const document = { q: this.s.currentOp.selector, u: updateDocument, multi: true, upsert: upsert }; if (updateDocument.hint) { document.hint = updateDocument.hint; } // Clear out current Op this.s.currentOp = null; return this.s.options.addToOperationsList(this, UPDATE, document); } /** * Add a single update operation to the bulk operation * * @method * @param {object} updateDocument An update field for an update operation. See {@link https://docs.mongodb.com/manual/reference/command/update/#update-command-u u documentation} * @param {object} [options.hint] An optional hint for query optimization. See the {@link https://docs.mongodb.com/manual/reference/command/update/#update-command-hint|update command} reference for more information. * @throws {MongoError} If operation cannot be added to bulk write * @return {OrderedBulkOperation|UnorderedBulkOperation} A reference to the parent BulkOperation */ updateOne(updateDocument) { // Perform upsert const upsert = typeof this.s.currentOp.upsert === 'boolean' ? this.s.currentOp.upsert : false; // Establish the update command const document = { q: this.s.currentOp.selector, u: updateDocument, multi: false, upsert: upsert }; if (updateDocument.hint) { document.hint = updateDocument.hint; } // Clear out current Op this.s.currentOp = null; return this.s.options.addToOperationsList(this, UPDATE, document); } /** * Add a replace one operation to the bulk operation * * @method * @param {object} updateDocument the new document to replace the existing one with * @throws {MongoError} If operation cannot be added to bulk write * @return {OrderedBulkOperation|UnorderedBulkOperation} A reference to the parent BulkOperation */ replaceOne(updateDocument) { this.updateOne(updateDocument); } /** * Upsert modifier for update bulk operation, noting that this operation is an upsert. * * @method * @throws {MongoError} If operation cannot be added to bulk write * @return {FindOperators} reference to self */ upsert() { this.s.currentOp.upsert = true; return this; } /** * Add a delete one operation to the bulk operation * * @method * @throws {MongoError} If operation cannot be added to bulk write * @return {OrderedBulkOperation|UnorderedBulkOperation} A reference to the parent BulkOperation */ deleteOne() { // Establish the update command const document = { q: this.s.currentOp.selector, limit: 1 }; // Clear out current Op this.s.currentOp = null; return this.s.options.addToOperationsList(this, REMOVE, document); } /** * Add a delete many operation to the bulk operation * * @method * @throws {MongoError} If operation cannot be added to bulk write * @return {OrderedBulkOperation|UnorderedBulkOperation} A reference to the parent BulkOperation */ delete() { // Establish the update command const document = { q: this.s.currentOp.selector, limit: 0 }; // Clear out current Op this.s.currentOp = null; return this.s.options.addToOperationsList(this, REMOVE, document); } /** * backwards compatability for deleteOne */ removeOne() { return this.deleteOne(); } /** * backwards compatability for delete */ remove() { return this.delete(); } } /** * @classdesc Parent class to OrderedBulkOperation and UnorderedBulkOperation * * **NOTE:** Internal Type, do not instantiate directly */ class BulkOperationBase { /** * Create a new OrderedBulkOperation or UnorderedBulkOperation instance * @property {number} length Get the number of operations in the bulk. */ constructor(topology, collection, options, isOrdered) { // determine whether bulkOperation is ordered or unordered this.isOrdered = isOrdered; options = options == null ? {} : options; // TODO Bring from driver information in isMaster // Get the namespace for the write operations const namespace = collection.s.namespace; // Used to mark operation as executed const executed = false; // Current item const currentOp = null; // Handle to the bson serializer, used to calculate running sizes const bson = topology.bson; // Set max byte size const isMaster = topology.lastIsMaster(); const maxBatchSizeBytes = isMaster && isMaster.maxBsonObjectSize ? isMaster.maxBsonObjectSize : 1024 * 1024 * 16; const maxWriteBatchSize = isMaster && isMaster.maxWriteBatchSize ? isMaster.maxWriteBatchSize : 1000; // Calculates the largest possible size of an Array key, represented as a BSON string // element. This calculation: // 1 byte for BSON type // # of bytes = length of (string representation of (maxWriteBatchSize - 1)) // + 1 bytes for null terminator const maxKeySize = (maxWriteBatchSize - 1).toString(10).length + 2; // Final options for retryable writes and write concern let finalOptions = Object.assign({}, options); finalOptions = applyRetryableWrites(finalOptions, collection.s.db); finalOptions = applyWriteConcern(finalOptions, { collection: collection }, options); const writeConcern = finalOptions.writeConcern; // Get the promiseLibrary const promiseLibrary = options.promiseLibrary || Promise; // Final results const bulkResult = { ok: 1, writeErrors: [], writeConcernErrors: [], insertedIds: [], nInserted: 0, nUpserted: 0, nMatched: 0, nModified: 0, nRemoved: 0, upserted: [] }; // Internal state this.s = { // Final result bulkResult: bulkResult, // Current batch state currentBatch: null, currentIndex: 0, // ordered specific currentBatchSize: 0, currentBatchSizeBytes: 0, // unordered specific currentInsertBatch: null, currentUpdateBatch: null, currentRemoveBatch: null, batches: [], // Write concern writeConcern: writeConcern, // Max batch size options maxBatchSizeBytes: maxBatchSizeBytes, maxWriteBatchSize: maxWriteBatchSize, maxKeySize, // Namespace namespace: namespace, // BSON bson: bson, // Topology topology: topology, // Options options: finalOptions, // Current operation currentOp: currentOp, // Executed executed: executed, // Collection collection: collection, // Promise Library promiseLibrary: promiseLibrary, // Fundamental error err: null, // check keys checkKeys: typeof options.checkKeys === 'boolean' ? options.checkKeys : true }; // bypass Validation if (options.bypassDocumentValidation === true) { this.s.bypassDocumentValidation = true; } } /** * Add a single insert document to the bulk operation * * @param {object} document the document to insert * @throws {MongoError} * @return {BulkOperationBase} A reference to self * * @example * const bulkOp = collection.initializeOrderedBulkOp(); * // Adds three inserts to the bulkOp. * bulkOp * .insert({ a: 1 }) * .insert({ b: 2 }) * .insert({ c: 3 }); * await bulkOp.execute(); */ insert(document) { if (this.s.collection.s.db.options.forceServerObjectId !== true && document._id == null) document._id = new ObjectID(); return this.s.options.addToOperationsList(this, INSERT, document); } /** * Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. * Returns a builder object used to complete the definition of the operation. * * @method * @param {object} selector The selector for the bulk operation. See {@link https://docs.mongodb.com/manual/reference/command/update/#update-command-q q documentation} * @throws {MongoError} if a selector is not specified * @return {FindOperators} A helper object with which the write operation can be defined. * * @example * const bulkOp = collection.initializeOrderedBulkOp(); * * // Add an updateOne to the bulkOp * bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } }); * * // Add an updateMany to the bulkOp * bulkOp.find({ c: 3 }).update({ $set: { d: 4 } }); * * // Add an upsert * bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } }); * * // Add a deletion * bulkOp.find({ g: 7 }).deleteOne(); * * // Add a multi deletion * bulkOp.find({ h: 8 }).delete(); * * // Add a replaceOne * bulkOp.find({ i: 9 }).replaceOne({ j: 10 }); * * // Update using a pipeline (requires Mongodb 4.2 or higher) * bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([ * { $set: { total: { $sum: [ '$y', '$z' ] } } } * ]); * * // All of the ops will now be executed * await bulkOp.execute(); */ find(selector) { if (!selector) { throw toError('Bulk find operation must specify a selector'); } // Save a current selector this.s.currentOp = { selector: selector }; return new FindOperators(this); } /** * Specifies a raw operation to perform in the bulk write. * * @method * @param {object} op The raw operation to perform. * @param {object} [options.hint] An optional hint for query optimization. See the {@link https://docs.mongodb.com/manual/reference/command/update/#update-command-hint|update command} reference for more information. * @return {BulkOperationBase} A reference to self */ raw(op) { const key = Object.keys(op)[0]; // Set up the force server object id const forceServerObjectId = typeof this.s.options.forceServerObjectId === 'boolean' ? this.s.options.forceServerObjectId : this.s.collection.s.db.options.forceServerObjectId; // Update operations if ( (op.updateOne && op.updateOne.q) || (op.updateMany && op.updateMany.q) || (op.replaceOne && op.replaceOne.q) ) { op[key].multi = op.updateOne || op.replaceOne ? false : true; return this.s.options.addToOperationsList(this, UPDATE, op[key]); } // Crud spec update format if (op.updateOne || op.updateMany || op.replaceOne) { const multi = op.updateOne || op.replaceOne ? false : true; const operation = { q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi }; if (op[key].hint) { operation.hint = op[key].hint; } if (this.isOrdered) { operation.upsert = op[key].upsert ? true : false; if (op.collation) operation.collation = op.collation; } else { if (op[key].upsert) operation.upsert = true; } if (op[key].arrayFilters) operation.arrayFilters = op[key].arrayFilters; return this.s.options.addToOperationsList(this, UPDATE, operation); } // Remove operations if ( op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || (op.deleteMany && op.deleteMany.q) ) { op[key].limit = op.removeOne ? 1 : 0; return this.s.options.addToOperationsList(this, REMOVE, op[key]); } // Crud spec delete operations, less efficient if (op.deleteOne || op.deleteMany) { const limit = op.deleteOne ? 1 : 0; const operation = { q: op[key].filter, limit: limit }; if (this.isOrdered) { if (op.collation) operation.collation = op.collation; } return this.s.options.addToOperationsList(this, REMOVE, operation); } // Insert operations if (op.insertOne && op.insertOne.document == null) { if (forceServerObjectId !== true && op.insertOne._id == null) op.insertOne._id = new ObjectID(); return this.s.options.addToOperationsList(this, INSERT, op.insertOne); } else if (op.insertOne && op.insertOne.document) { if (forceServerObjectId !== true && op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID(); return this.s.options.addToOperationsList(this, INSERT, op.insertOne.document); } if (op.insertMany) { for (let i = 0; i < op.insertMany.length; i++) { if (forceServerObjectId !== true && op.insertMany[i]._id == null) op.insertMany[i]._id = new ObjectID(); this.s.options.addToOperationsList(this, INSERT, op.insertMany[i]); } return; } // No valid type of operation throw toError( 'bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany' ); } /** * helper function to assist with promiseOrCallback behavior * @ignore * @param {*} err * @param {*} callback */ _handleEarlyError(err, callback) { if (typeof callback === 'function') { callback(err, null); return; } return this.s.promiseLibrary.reject(err); } /** * An internal helper method. Do not invoke directly. Will be going away in the future * * @ignore * @method * @param {class} bulk either OrderedBulkOperation or UnorderdBulkOperation * @param {object} writeConcern * @param {object} options * @param {function} callback */ bulkExecute(_writeConcern, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = options || {}; if (typeof _writeConcern === 'function') { callback = _writeConcern; } else if (_writeConcern && typeof _writeConcern === 'object') { this.s.writeConcern = _writeConcern; } if (this.s.executed) { const executedError = toError('batch cannot be re-executed'); return this._handleEarlyError(executedError, callback); } // If we have current batch if (this.isOrdered) { if (this.s.currentBatch) this.s.batches.push(this.s.currentBatch); } else { if (this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch); if (this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch); if (this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch); } // If we have no operations in the bulk raise an error if (this.s.batches.length === 0) { const emptyBatchError = toError('Invalid Operation, no operations specified'); return this._handleEarlyError(emptyBatchError, callback); } return { options, callback }; } /** * The callback format for results * @callback BulkOperationBase~resultCallback * @param {MongoError} error An error instance representing the error during the execution. * @param {BulkWriteResult} result The bulk write result. */ /** * Execute the bulk operation * * @method * @param {WriteConcern} [_writeConcern] Optional write concern. Can also be specified through options. * @param {object} [options] Optional settings. * @param {(number|string)} [options.w] The write concern. * @param {number} [options.wtimeout] The write concern timeout. * @param {boolean} [options.j=false] Specify a journal write concern. * @param {boolean} [options.fsync=false] Specify a file sync write concern. * @param {BulkOperationBase~resultCallback} [callback] A callback that will be invoked when bulkWrite finishes/errors * @throws {MongoError} Throws error if the bulk object has already been executed * @throws {MongoError} Throws error if the bulk object does not have any operations * @return {Promise|void} returns Promise if no callback passed */ execute(_writeConcern, options, callback) { const ret = this.bulkExecute(_writeConcern, options, callback); if (!ret || isPromiseLike(ret)) { return ret; } options = ret.options; callback = ret.callback; return executeLegacyOperation(this.s.topology, executeCommands, [this, options, callback]); } /** * Handles final options before executing command * * An internal method. Do not invoke. Will not be accessible in the future * * @ignore * @param {object} config * @param {object} config.options * @param {number} config.batch * @param {function} config.resultHandler * @param {function} callback */ finalOptionsHandler(config, callback) { const finalOptions = Object.assign({ ordered: this.isOrdered }, config.options); if (this.s.writeConcern != null) { finalOptions.writeConcern = this.s.writeConcern; } if (finalOptions.bypassDocumentValidation !== true) { delete finalOptions.bypassDocumentValidation; } // Set an operationIf if provided if (this.operationId) { config.resultHandler.operationId = this.operationId; } // Serialize functions if (this.s.options.serializeFunctions) { finalOptions.serializeFunctions = true; } // Ignore undefined if (this.s.options.ignoreUndefined) { finalOptions.ignoreUndefined = true; } // Is the bypassDocumentValidation options specific if (this.s.bypassDocumentValidation === true) { finalOptions.bypassDocumentValidation = true; } // Is the checkKeys option disabled if (this.s.checkKeys === false) { finalOptions.checkKeys = false; } if (finalOptions.retryWrites) { if (config.batch.batchType === UPDATE) { finalOptions.retryWrites = finalOptions.retryWrites && !config.batch.operations.some(op => op.multi); } if (config.batch.batchType === REMOVE) { finalOptions.retryWrites = finalOptions.retryWrites && !config.batch.operations.some(op => op.limit === 0); } } try { if (config.batch.batchType === INSERT) { this.s.topology.insert( this.s.namespace, config.batch.operations, finalOptions, config.resultHandler ); } else if (config.batch.batchType === UPDATE) { this.s.topology.update( this.s.namespace, config.batch.operations, finalOptions, config.resultHandler ); } else if (config.batch.batchType === REMOVE) { this.s.topology.remove( this.s.namespace, config.batch.operations, finalOptions, config.resultHandler ); } } catch (err) { // Force top level error err.ok = 0; // Merge top level error and return handleCallback(callback, null, mergeBatchResults(config.batch, this.s.bulkResult, err, null)); } } /** * Handles the write error before executing commands * * An internal helper method. Do not invoke directly. Will be going away in the future * * @ignore * @param {function} callback * @param {BulkWriteResult} writeResult * @param {class} self either OrderedBulkOperation or UnorderdBulkOperation */ handleWriteError(callback, writeResult) { if (this.s.bulkResult.writeErrors.length > 0) { if (this.s.bulkResult.writeErrors.length === 1) { handleCallback( callback, new BulkWriteError(toError(this.s.bulkResult.writeErrors[0]), writeResult), null ); return true; } const msg = this.s.bulkResult.writeErrors[0].errmsg ? this.s.bulkResult.writeErrors[0].errmsg : 'write operation failed'; handleCallback( callback, new BulkWriteError( toError({ message: msg, code: this.s.bulkResult.writeErrors[0].code, writeErrors: this.s.bulkResult.writeErrors }), writeResult ), null ); return true; } else if (writeResult.getWriteConcernError()) { handleCallback( callback, new BulkWriteError(toError(writeResult.getWriteConcernError()), writeResult), null ); return true; } } } Object.defineProperty(BulkOperationBase.prototype, 'length', { enumerable: true, get: function() { return this.s.currentIndex; } }); // Exports symbols module.exports = { Batch, BulkOperationBase, bson, INSERT: INSERT, UPDATE: UPDATE, REMOVE: REMOVE, BulkWriteError };
1
16,858
will this ensure the indexes for ordered writes as well?
mongodb-node-mongodb-native
js
@@ -28,7 +28,11 @@ from nupic.support.lockattributes import LockAttributesMixin import functools basicTypes = ['Byte', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', 'UInt64', - 'Real32', 'Real64', 'Handle'] + 'Real32', 'Real64', 'Handle', 'Bool'] + +arrayTypes = ['ByteArray', 'Int16Array', 'UInt16Array', 'Int32Array', + 'UInt32Array', 'Int64Array', 'UInt64Array', 'Real32Array', + 'Real64Array', 'BoolArray'] pyRegions = ( ("nupic.regions.AnomalyRegion", "AnomalyRegion"),
1
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import os import sys import nupic.bindings.engine_internal as engine from nupic.support.lockattributes import LockAttributesMixin import functools basicTypes = ['Byte', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', 'UInt64', 'Real32', 'Real64', 'Handle'] pyRegions = ( ("nupic.regions.AnomalyRegion", "AnomalyRegion"), ("nupic.regions.CLAClassifierRegion", "CLAClassifierRegion"), ("nupic.regions.ImageSensor", "ImageSensor"), ("nupic.regions.KNNAnomalyClassifierRegion", "KNNAnomalyClassifierRegion"), ("nupic.regions.KNNClassifierRegion", "KNNClassifierRegion"), ("nupic.regions.PyRegion", "PyRegion"), ("nupic.regions.RecordSensor", "RecordSensor"), ("nupic.regions.SPRegion", "SPRegion"), ("nupic.regions.SVMClassifierNode", "SVMClassifierNode"), ("nupic.regions.TPRegion", "TPRegion"), ("nupic.bindings.regions.TestNode", "TestNode"), ("nupic.regions.TestRegion", "TestRegion"), ("nupic.regions.UnimportableNode", "UnimportableNode"), ("nupic.regions.PluggableEncoderSensor", "PluggableEncoderSensor"), ("nupic.regions.extra.GaborNode2", "GaborNode2")) registeredRegions = False def registerBuiltInRegions(): global registeredRegions # Initialize nupic regions if not registeredRegions: for module, className in pyRegions: engine.Network.registerPyRegion(module, className) registeredRegions = True registerBuiltInRegions() # Import all the array types from engine (there is no HandleArray) arrayTypes = [t + 'Array' for t in basicTypes[:-1]] for a in arrayTypes: exec('from %s import %s as %s' % (engine.__name__, a, a)) # Intercept the default exception handling for the purposes of stripping # parts of the stack trace that can confuse users. If you want the original # stack trace define this environment variable if not 'NTA_STANDARD_PYTHON_UNHANDLED_EXCEPTIONS' in os.environ: import traceback import cStringIO def customExceptionHandler(type, value, tb): """Catch unhandled Python exception The handler prints the original exception info including into a buffer. It then extracts the original error message (when the exception is raised inside a Py node additional stacktrace info will be appended in the end) and saves the original exception to a file called error.txt. It prints just the error message to the screen and tells the user about the error.txt file. """ # Print the exception info to a string IO buffer for manipulation buff = cStringIO.StringIO() traceback.print_exception(type, value, tb, file=buff) text = buff.getvalue() # get the lines skip the first one: "Traceback (most recent call last)" lines = text.split('\n')[1:] # # Extract the error message begin = 0 end = len(lines) for i, line in enumerate(lines): if line.startswith('RuntimeError:'): begin = i # # elif line.startswith('Traceback (most recent call last):'): # end = i # break # message = '\n'.join(lines[begin:end]) message = message[len('Runtime Error:'):] #stacktrace = lines[end:] # Get the stack trace if available (default to empty string) stacktrace = getattr(value, 'stackTrace', '') # Remove engine from stack trace lines = [x for x in lines if 'engine' not in x] failMessage = 'The program failed with the following error message:' dashes = '-' * len(failMessage) print print dashes print 'Traceback (most recent call last):' print '\n'.join(lines[:begin-2]) if stacktrace: print stacktrace print dashes print 'The program failed with the following error message:' print dashes print message print #sys.excepthook = customExceptionHandler # Expose the timer class directly # Do it this way instead of bringing engine.Timer # into the namespace to avoid engine # in the class name class Timer(engine.Timer): pass # Expose the os class directly # The only wrapped method is getProcessMemoryUsage() class OS(engine.OS): pass class Dimensions(engine.Dimensions): """Represent the topology of an N-dimensional region Basically, it is a list of integers such as: [4, 8, 6] In this example the topology is a 3 dimensional region with 4 x 8 x 6 nodes. You can initialize it with a list of dimensions or with no arguments and then append dimensions. """ def __init__(self, *args): """Construct a Dimensions object The constructor can be called with no arguments or with a list of integers """ # Init the base class engine.Dimensions.__init__(self, *args) def __str__(self): return self.toString() def Array(dtype, size=None, ref=False): """Factory function that creates typed Array or ArrayRef objects dtype - the data type of the array (as string). Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64 size - the size of the array. Must be positive integer. """ def getArrayType(self): """A little function to replace the getType() method of arrays It returns a string representation of the array element type instead of the integer value (NTA_BasicType enum) returned by the origianl array """ return self._dtype # ArrayRef can't be allocated if ref: assert size is None index = basicTypes.index(dtype) if index == -1: raise Exception('Invalid data type: ' + dtype) if size and size <= 0: raise Exception('Array size must be positive') suffix = 'ArrayRef' if ref else 'Array' arrayFactory = getattr(engine, dtype + suffix) arrayFactory.getType = getArrayType if size: a = arrayFactory(size) else: a = arrayFactory() a._dtype = basicTypes[index] return a def ArrayRef(dtype): return Array(dtype, None, True) class CollectionIterator(object): def __init__(self, collection): self.collection = collection self.index = 0 def next(self): index = self.index if index == self.collection.getCount(): raise StopIteration self.index += 1 return self.collection.getByIndex(index)[0] class CollectionWrapper(object): """Wrap an nupic::Collection with a dict-like interface The optional valueWrapper is used to wrap values for adaptation purposes. Maintains the original documentation collection - the original collection valueWrapper - an optional callable object used to wrap values. """ def IdentityWrapper(o): return o def __init__(self, collection, valueWrapper=IdentityWrapper): self.collection = collection self.valueWrapper = valueWrapper self.__class__.__doc__ == collection.__class__.__doc__ def __iter__(self): return CollectionIterator(self.collection) def __str__(self): return str(self.collection) def __repr__(self): return repr(self.collection) def __len__(self): return self.collection.getCount() def __getitem__(self, key): if not self.collection.contains(key): raise KeyError('Key ' + key + ' not found') value = self.collection.getByName(key) value = self.valueWrapper(key, value) return value def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def __contains__(self, key): return self.collection.contains(key) def keys(self): keys = set() for i in range(self.collection.getCount()): keys.add(self.collection.getByIndex(i)[0]) return keys def values(self): values = set() for i in range(self.collection.getCount()): p = self.collection.getByIndex(i) values.add(self.valueWrapper(p[0], p[1])) return values def items(self): items = set() for i in range(self.collection.getCount()): p = self.collection.getByIndex(i) items.add((p[0], self.valueWrapper(p[0], p[1]))) return items def __cmp__(self, other): return self.collection == other.collection def __hash__(self): return hash(self.collection) class SpecItem(object): """Wrapper that translates the data type and access code to a string The original values are an enumerated type in C++ that become just integers in Python. This class wraps the original ParameterSpec and translates the integer values to meaningful strings: that correspond to the C++ enum labels. It is used to wrap ParameterSpec, InputSpec and OutputSpec """ accessModes = ['Create', 'ReadOnly', 'ReadWrite'] def __init__(self, name, item): self.name = name self.item = item self.__class__.__doc__ == item.__class__.__doc__ # Translate data type to string representation self.dataType = basicTypes[item.dataType] # Translate access mode to string representation if hasattr(item, 'accessMode'): # ParameterSpec only self.accessMode = SpecItem.accessModes[item.accessMode] def __getattr__(self, name): return getattr(self.item, name) def __str__(self): d = dict(name=self.name, description=self.description, dataType=self.dataType, count=self.count) if hasattr(self.item, 'accessMode'): # ParameterSpec only self.accessMode = SpecItem.accessModes[self.item.accessMode] if hasattr(self.item, 'accessMode'): # ParameterSpec only d['accessMode'] = self.accessMode if hasattr(self.item, 'constraints'): # ParameterSpec only d['constraints'] = self.constraints if hasattr(self.item, 'defaultValue'): # ParameterSpec only d['defaultValue'] = self.defaultValue return str(d) class Spec(object): def __init__(self, spec): self.spec = spec self.__class__.__doc__ == spec.__class__.__doc__ self.description = spec.description self.singleNodeOnly = spec.singleNodeOnly self.inputs = CollectionWrapper(spec.inputs, SpecItem) self.outputs = CollectionWrapper(spec.outputs, SpecItem) self.parameters = CollectionWrapper(spec.parameters, SpecItem) self.commands = CollectionWrapper(spec.commands) def __str__(self): return self.spec.toString() def __repr__(self): return self.spec.toString() class _ArrayParameterHelper: """This class is used by Region._getParameterMethods""" def __init__(self, region, datatype): self._region = region self.datatype = basicTypes[datatype] def getParameterArray(self, paramName): # return a PyArray instead of a plain array. # PyArray constructor/class for type X is called XArray() #factoryName = self.datatype + 'Array' #if factoryName not in globals(): # import exceptions # raise exceptions.Exception("Internal error -- did not find %s constructor in engine" % factoryName) # #arrayFactory = globals()[factoryName] #a = arrayFactory(); a = Array(self.datatype) self._region.getParameterArray(paramName, a) return a class Region(LockAttributesMixin): """ @doc:place_holder(Region.description) """ #Wrapper for a network region #- Maintains original documentation #- Implement syntactic sugar properties: #name = property(getName) #type = property(getType) #spec = property(getSpec) #dimensions = property(getDimensions, setDimensions) #network = property(getNetwork) #- Makes sure that returned objects are high-level wrapper objects #- Forwards everything else to internal region def __init__(self, region, network): """Store the wraped region and hosting network The network is the high-level Network and not the internal Network. This is important in case the user requests the network from the region (never leak a engine object, remember) """ self._network = network self._region = region self.__class__.__doc__ == region.__class__.__doc__ # A cache for typed get/setPArameter() calls self._paramTypeCache = {} def __getattr__(self, name): if not '_region' in self.__dict__: raise AttributeError return getattr(self._region, name) def __setattr__(self, name, value): if name in ('_region', '__class__', '_network'): self.__dict__[name] = value elif name == 'dimensions': self.setDimensions(value) else: setattr(self._region, name, value) @staticmethod def getSpecFromType(nodeType): """ @doc:place_holder(Region.getSpecFromType) """ return Spec(engine.Region.getSpecFromType(nodeType)) def compute(self): """ @doc:place_holder(Region.compute) ** This line comes from the original docstring (not generated by Documentor) """ return self._region.compute() def getInputData(self, inputName): """ @doc:place_holder(Region.getInputData) """ return self._region.getInputArray(inputName) def getOutputData(self, outputName): """ @doc:place_holder(Region.getOutputData) """ return self._region.getOutputArray(outputName) def getInputNames(self): """ Returns list of input names in spec. """ inputs = self.getSpec().inputs return [inputs.getByIndex(i)[0] for i in xrange(inputs.getCount())] def getOutputNames(self): """ Returns list of output names in spec. """ outputs = self.getSpec().outputs return [outputs.getByIndex(i)[0] for i in xrange(outputs.getCount())] def executeCommand(self, args): """ @doc:place_holder(Region.executeCommand) """ return self._region.executeCommand(args) def _getSpec(self): """Spec of the region""" return Spec(self._region.getSpec()) def _getDimensions(self): """Dimensions of the region""" return Dimensions(tuple(self._region.getDimensions())) def _getNetwork(self): """Network for the region""" return self._network def __hash__(self): """Hash a region""" return self._region.__hash__() def __cmp__(self, other): """Compare regions""" return self._region == other._region def _getParameterMethods(self, paramName): """Returns functions to set/get the parameter. These are the strongly typed functions get/setParameterUInt32, etc. The return value is a pair: setfunc, getfunc If the parameter is not available on this region, setfunc/getfunc are None. """ if paramName in self._paramTypeCache: return self._paramTypeCache[paramName] try: # Catch the error here. We will re-throw in getParameter or # setParameter with a better error message than we could generate here paramSpec = self.getSpec().parameters.getByName(paramName) except: return (None, None) dataType = paramSpec.dataType dataTypeName = basicTypes[dataType] count = paramSpec.count if count == 1: # Dynamically generate the proper typed get/setParameter<dataType> x = 'etParameter' + dataTypeName try: g = getattr(self, 'g' + x) # get the typed getParameter method s = getattr(self, 's' + x) # get the typed setParameter method except AttributeError: raise Exception("Internal error: unknown parameter type %s" % dataTypeName) info = (s, g) else: if dataTypeName == "Byte": info = (self.setParameterString, self.getParameterString) else: helper = _ArrayParameterHelper(self, dataType) info = (self.setParameterArray, helper.getParameterArray) self._paramTypeCache[paramName] = info return info def getParameter(self, paramName): """Get parameter value""" (setter, getter) = self._getParameterMethods(paramName) if getter is None: import exceptions raise exceptions.Exception( "getParameter -- parameter name '%s' does not exist in region %s of type %s" % (paramName, self.name, self.type)) return getter(paramName) def setParameter(self, paramName, value): """Set parameter value""" (setter, getter) = self._getParameterMethods(paramName) if setter is None: import exceptions raise exceptions.Exception( "setParameter -- parameter name '%s' does not exist in region %s of type %s" % (paramName, self.name, self.type)) setter(paramName, value) def _get(self, method): """Auto forwarding of properties to get methods of internal region""" return getattr(self._region, method)() network = property(_getNetwork, doc='@property:place_holder(Region.getNetwork)') name = property(functools.partial(_get, method='getName'), doc="@property:place_holder(Region.getName)") type = property(functools.partial(_get, method='getType'), doc='@property:place_holder(Region.getType)') spec = property(_getSpec, doc='@property:place_holder(Region.getSpec)') dimensions = property(_getDimensions, engine.Region.setDimensions, doc='@property:place_holder(Region.getDimensions)') computeTimer = property(functools.partial(_get, method='getComputeTimer'), doc='@property:place_holder(Region.getComputeTimer)') executeTimer = property(functools.partial(_get, method='getExecuteTimer'), doc='@property:place_holder(Region.getExecuteTimer)') class Network(engine.Network): """ @doc:place_holder(Network.description) """ def __init__(self, *args): """Constructor - Initialize the internal engine.Network class generated by Swig - Attach docstrings to selected methods """ # Init engine.Network class engine.Network.__init__(self, *args) # Prepare documentation table. # Each item is pair of method/property, docstring # The docstring is attached later to the method or property. # The key for method items is the method object of the engine.Network class. # The key for properties is the property name docTable = ( (engine.Network.getRegions, 'Get the collection of regions in a network'), ) # Attach documentation to methods and properties for obj, docString in docTable: if isinstance(obj, str): prop = getattr(Network, obj) assert isinstance(prop, property) setattr(Network, obj, property(prop.fget, prop.fset, prop.fdel, docString)) else: obj.im_func.__doc__ = docString def _getRegions(self): """Get the collection of regions in a network This is a tricky one. The collection of regions returned from from the internal network is a collection of internal regions. The desired collection is a collelcion of net.Region objects that also points to this network (net.network) and not to the internal network. To achieve that a CollectionWrapper class is used with a custom makeRegion() function (see bellow) as a value wrapper. The CollectionWrapper class wraps each value in the original collection with the result of the valueWrapper. """ def makeRegion(name, r): """Wrap a engine region with a nupic.engine.Region Also passes the containing nupic.engine.Network network in _network. This function is passed a value wrapper to the CollectionWrapper """ r = Region(r, self) #r._network = self return r regions = CollectionWrapper(engine.Network.getRegions(self), makeRegion) return regions def addRegion(self, name, nodeType, nodeParams): """ @doc:place_holder(Network.addRegion) """ engine.Network.addRegion(self, name, nodeType, nodeParams) return self._getRegions()[name] def addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label): """ @doc:place_holder(Network.addRegionFromBundle) """ engine.Network.addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label) return self._getRegions()[name] def setPhases(self, name, phases): """ @doc:place_holder(Network.setPhases) """ phases = engine.UInt32Set(phases) engine.Network.setPhases(self, name, phases) def run(self, n): """ @doc:place_holder(Network.run) """ #Just forward to the internal network #This is needed for inspectors to work properly because they wrap some key #methods such as 'run'. engine.Network.run(self, n) def disableProfiling(self, *args, **kwargs): """ @doc:place_holder(Network.disableProfiling) """ engine.Network.disableProfiling(self, *args, **kwargs) def enableProfiling(self, *args, **kwargs): """ @doc:place_holder(Network.enableProfiling) """ engine.Network.enableProfiling(self, *args, **kwargs) def getCallbacks(self, *args, **kwargs): """ @doc:place_holder(Network.getCallbacks) """ engine.Network.getCallbacks(self, *args, **kwargs) def initialize(self, *args, **kwargs): """ @doc:place_holder(Network.initialize) """ engine.Network.initialize(self, *args, **kwargs) def link(self, *args, **kwargs): """ @doc:place_holder(Network.link) """ engine.Network.link(self, *args, **kwargs) def removeLink(self, *args, **kwargs): """ @doc:place_holder(Network.removeLink) """ engine.Network.removeLink(self, *args, **kwargs) def removeRegion(self, *args, **kwargs): """ @doc:place_holder(Network.removeRegion) """ engine.Network.removeRegion(self, *args, **kwargs) def resetProfiling(self, *args, **kwargs): """ @doc:place_holder(Network.resetProfiling) """ engine.Network.resetProfiling(self, *args, **kwargs) def save(self, *args, **kwargs): """ @doc:place_holder(Network.save) """ if len(args) > 0 and not isinstance(args[0], str): raise TypeError("Save path must be of type {}.".format(str)) engine.Network.save(self, *args, **kwargs) def getRegionsByType(self, regionClass): """ Gets all region instances of a given class (for example, nupic.regions.SPRegion.SPRegion). """ regions = [] for region in self.regions.values(): if type(region.getSelf()) is regionClass: regions.append(region) return regions @staticmethod def registerRegion(regionClass): """ Adds the module and class name for the region to the list of classes the network can use regionClass: a pointer to a subclass of PyRegion """ engine.Network.registerPyRegion(regionClass.__module__, regionClass.__name__) @staticmethod def unregisterRegion(regionName): """ Unregisters a region from the internal list of regions :param str regionName: The name of the region to unregister (ex: regionName=regionClass.__name__) """ engine.Network.unregisterPyRegion(regionName) # Syntactic sugar properties regions = property(_getRegions, doc='@property:place_holder(Network.getRegions)') minPhase = property(engine.Network.getMinPhase, doc='@property:place_holder(Network.getMinPhase)') maxPhase = property(engine.Network.getMaxPhase, doc='@property:place_holder(Network.getMaxPhase)') minEnabledPhase = property( engine.Network.getMinEnabledPhase, engine.Network.setMinEnabledPhase, doc='@property:place_holder(Network.getMinEnabledPhase)') maxEnabledPhase = property( engine.Network.getMaxEnabledPhase, engine.Network.setMaxEnabledPhase, doc='@property:place_holder(Network.getMaxEnabledPhase)') if __name__ == '__main__': n = Network() print n.regions print len(n.regions) print Network.regions.__doc__ d = Dimensions([3, 4, 5]) print len(d) print d a = Array('Byte', 5) print len(a) for i in range(len(a)): a[i] = ord('A') + i for i in range(len(a)): print a[i] r = n.addRegion('r', 'TestNode', '') print 'name:', r.name print 'node type:', r.type print 'node spec:', r.spec
1
20,687
Put `BoolArray` first?
numenta-nupic
py
@@ -0,0 +1,3 @@ +window.appMode = 'standalone'; + +import('./site');
1
1
17,790
I believe this whole file can be deleted now.
jellyfin-jellyfin-web
js
@@ -34,6 +34,17 @@ var ( Usage: "Port for listening incoming api requests", Value: 4050, } + + promiseCurrencyFlag = cli.StringFlag{ + Name: "promise.currency", + Usage: "Type of currency that will be used for issuing promises", + Value: "MYST", + } + promiseAmountFlag = cli.IntFlag{ + Name: "promise.amount", + Usage: "Amount of money that will be used for issuing a single promise", + Value: 100, + } ) // RegisterFlagsNode function register node flags to flag list
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package cmd import ( openvpn_core "github.com/mysteriumnetwork/go-openvpn/openvpn/core" "github.com/mysteriumnetwork/node/core/node" "github.com/urfave/cli" ) var ( tequilapiAddressFlag = cli.StringFlag{ Name: "tequilapi.address", Usage: "IP address of interface to listen for incoming connections", Value: "127.0.0.1", } tequilapiPortFlag = cli.IntFlag{ Name: "tequilapi.port", Usage: "Port for listening incoming api requests", Value: 4050, } ) // RegisterFlagsNode function register node flags to flag list func RegisterFlagsNode(flags *[]cli.Flag) error { if err := RegisterFlagsDirectory(flags); err != nil { return err } *flags = append(*flags, tequilapiAddressFlag, tequilapiPortFlag) RegisterFlagsNetwork(flags) openvpn_core.RegisterFlags(flags) RegisterFlagsLocation(flags) return nil } // ParseFlagsNode function fills in node options from CLI context func ParseFlagsNode(ctx *cli.Context) node.Options { return node.Options{ ParseFlagsDirectory(ctx), ctx.GlobalString(tequilapiAddressFlag.Name), ctx.GlobalInt(tequilapiPortFlag.Name), openvpn_core.ParseFlags(ctx), ParseFlagsLocation(ctx), ParseFlagsNetwork(ctx), } }
1
11,837
We should specify format for a user. I.e: Integer, 1000 == 1 MYST
mysteriumnetwork-node
go
@@ -132,6 +132,12 @@ func runControllers(ctx context.Context, config *Config) error { } } + for _, controller := range config.Controllers { + if err := controller(ctx, sc); err != nil { + return errors.Wrap(err, "controller") + } + } + if err := sc.Start(ctx); err != nil { return err }
1
package server import ( "context" "crypto/sha256" "encoding/hex" "fmt" "io/ioutil" net2 "net" "os" "path" "path/filepath" "strconv" "strings" "time" corev1 "k8s.io/api/core/v1" "github.com/k3s-io/helm-controller/pkg/helm" "github.com/pkg/errors" "github.com/rancher/k3s/pkg/apiaddresses" "github.com/rancher/k3s/pkg/clientaccess" "github.com/rancher/k3s/pkg/daemons/config" "github.com/rancher/k3s/pkg/daemons/control" "github.com/rancher/k3s/pkg/datadir" "github.com/rancher/k3s/pkg/deploy" "github.com/rancher/k3s/pkg/node" "github.com/rancher/k3s/pkg/nodepassword" "github.com/rancher/k3s/pkg/rootlessports" "github.com/rancher/k3s/pkg/servicelb" "github.com/rancher/k3s/pkg/static" "github.com/rancher/k3s/pkg/util" "github.com/rancher/k3s/pkg/version" v1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1" "github.com/rancher/wrangler/pkg/leader" "github.com/rancher/wrangler/pkg/resolvehome" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/net" ) const ( MasterRoleLabelKey = "node-role.kubernetes.io/master" ControlPlaneRoleLabelKey = "node-role.kubernetes.io/control-plane" ETCDRoleLabelKey = "node-role.kubernetes.io/etcd" ) func ResolveDataDir(dataDir string) (string, error) { dataDir, err := datadir.Resolve(dataDir) return filepath.Join(dataDir, "server"), err } func StartServer(ctx context.Context, config *Config) error { if err := setupDataDirAndChdir(&config.ControlConfig); err != nil { return err } if err := setNoProxyEnv(&config.ControlConfig); err != nil { return err } if err := control.Server(ctx, &config.ControlConfig); err != nil { return errors.Wrap(err, "starting kubernetes") } config.ControlConfig.Runtime.Handler = router(ctx, config) if config.ControlConfig.DisableAPIServer { go setETCDLabelsAndAnnotations(ctx, config) } else { go startOnAPIServerReady(ctx, config) } for _, hook := range config.StartupHooks { if err := hook(ctx, config.ControlConfig.Runtime.APIServerReady, config.ControlConfig.Runtime.KubeConfigAdmin); err != nil { return errors.Wrap(err, "startup hook") } } ip := net2.ParseIP(config.ControlConfig.BindAddress) if ip == nil { hostIP, err := net.ChooseHostInterface() if err == nil { ip = hostIP } else { ip = net2.ParseIP("127.0.0.1") } } if err := printTokens(ip.String(), &config.ControlConfig); err != nil { return err } return writeKubeConfig(config.ControlConfig.Runtime.ServerCA, config) } func startOnAPIServerReady(ctx context.Context, config *Config) { select { case <-ctx.Done(): return case <-config.ControlConfig.Runtime.APIServerReady: if err := runControllers(ctx, config); err != nil { logrus.Fatalf("failed to start controllers: %v", err) } } } func runControllers(ctx context.Context, config *Config) error { controlConfig := &config.ControlConfig sc, err := newContext(ctx, controlConfig.Runtime.KubeConfigAdmin) if err != nil { return err } if err := stageFiles(ctx, sc, controlConfig); err != nil { return err } // run migration before we set controlConfig.Runtime.Core if err := nodepassword.MigrateFile( sc.Core.Core().V1().Secret(), sc.Core.Core().V1().Node(), controlConfig.Runtime.NodePasswdFile); err != nil { logrus.Warn(errors.Wrapf(err, "error migrating node-password file")) } controlConfig.Runtime.Core = sc.Core if controlConfig.Runtime.ClusterControllerStart != nil { if err := controlConfig.Runtime.ClusterControllerStart(ctx); err != nil { return errors.Wrapf(err, "starting cluster controllers") } } if err := sc.Start(ctx); err != nil { return err } start := func(ctx context.Context) { if err := coreControllers(ctx, sc, config); err != nil { panic(err) } if err := sc.Start(ctx); err != nil { panic(err) } } go setControlPlaneRoleLabel(ctx, sc.Core.Core().V1().Node(), config) go setClusterDNSConfig(ctx, config, sc.Core.Core().V1().ConfigMap()) if controlConfig.NoLeaderElect { go func() { start(ctx) <-ctx.Done() logrus.Fatal("controllers exited") }() } else { go leader.RunOrDie(ctx, "", version.Program, sc.K8s, start) } return nil } func coreControllers(ctx context.Context, sc *Context, config *Config) error { if err := node.Register(ctx, !config.ControlConfig.Skips["coredns"], sc.Core.Core().V1().Secret(), sc.Core.Core().V1().ConfigMap(), sc.Core.Core().V1().Node()); err != nil { return err } helm.Register(ctx, sc.Apply, sc.Helm.Helm().V1().HelmChart(), sc.Helm.Helm().V1().HelmChartConfig(), sc.Batch.Batch().V1().Job(), sc.Auth.Rbac().V1().ClusterRoleBinding(), sc.Core.Core().V1().ServiceAccount(), sc.Core.Core().V1().ConfigMap()) if err := servicelb.Register(ctx, sc.K8s, sc.Apply, sc.Apps.Apps().V1().DaemonSet(), sc.Apps.Apps().V1().Deployment(), sc.Core.Core().V1().Node(), sc.Core.Core().V1().Pod(), sc.Core.Core().V1().Service(), sc.Core.Core().V1().Endpoints(), !config.DisableServiceLB, config.Rootless); err != nil { return err } if err := apiaddresses.Register(ctx, config.ControlConfig.Runtime, sc.Core.Core().V1().Endpoints()); err != nil { return err } if config.Rootless { return rootlessports.Register(ctx, sc.Core.Core().V1().Service(), !config.DisableServiceLB, config.ControlConfig.HTTPSPort) } return nil } func stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control) error { dataDir := filepath.Join(controlConfig.DataDir, "static") if err := static.Stage(dataDir); err != nil { return err } dataDir = filepath.Join(controlConfig.DataDir, "manifests") templateVars := map[string]string{ "%{CLUSTER_DNS}%": controlConfig.ClusterDNS.String(), "%{CLUSTER_DOMAIN}%": controlConfig.ClusterDomain, "%{DEFAULT_LOCAL_STORAGE_PATH}%": controlConfig.DefaultLocalStoragePath, } skip := controlConfig.Skips if !skip["traefik"] && isHelmChartTraefikV1(sc) { logrus.Warn("Skipping Traefik v2 deployment due to existing Traefik v1 installation") skip["traefik"] = true } if err := deploy.Stage(dataDir, templateVars, skip); err != nil { return err } return deploy.WatchFiles(ctx, sc.Apply, sc.K3s.K3s().V1().Addon(), controlConfig.Disables, dataDir) } // isHelmChartTraefikV1 checks for an existing HelmChart resource with spec.chart containing traefik-1, // as deployed by the legacy chart (https://%{KUBERNETES_API}%/static/charts/traefik-1.81.0.tgz) func isHelmChartTraefikV1(sc *Context) bool { prefix := "traefik-1." helmChart, err := sc.Helm.Helm().V1().HelmChart().Get(metav1.NamespaceSystem, "traefik", metav1.GetOptions{}) if err != nil { logrus.WithError(err).Info("Failed to get existing traefik HelmChart") return false } chart := path.Base(helmChart.Spec.Chart) if strings.HasPrefix(chart, prefix) { logrus.WithField("chart", chart).Info("Found existing traefik v1 HelmChart") return true } return false } func HomeKubeConfig(write, rootless bool) (string, error) { if write { if os.Getuid() == 0 && !rootless { return datadir.GlobalConfig, nil } return resolvehome.Resolve(datadir.HomeConfig) } if _, err := os.Stat(datadir.GlobalConfig); err == nil { return datadir.GlobalConfig, nil } return resolvehome.Resolve(datadir.HomeConfig) } func printTokens(advertiseIP string, config *config.Control) error { var ( nodeFile string ) if advertiseIP == "" { advertiseIP = "127.0.0.1" } if len(config.Runtime.ServerToken) > 0 { p := filepath.Join(config.DataDir, "token") if err := writeToken(config.Runtime.ServerToken, p, config.Runtime.ServerCA); err == nil { logrus.Infof("Node token is available at %s", p) nodeFile = p } // backwards compatibility np := filepath.Join(config.DataDir, "node-token") if !isSymlink(np) { if err := os.RemoveAll(np); err != nil { return err } if err := os.Symlink(p, np); err != nil { return err } } } if len(nodeFile) > 0 { printToken(config.SupervisorPort, advertiseIP, "To join node to cluster:", "agent") } return nil } func writeKubeConfig(certs string, config *Config) error { ip := config.ControlConfig.BindAddress if ip == "" { ip = "127.0.0.1" } url := fmt.Sprintf("https://%s:%d", ip, config.ControlConfig.HTTPSPort) kubeConfig, err := HomeKubeConfig(true, config.Rootless) def := true if err != nil { kubeConfig = filepath.Join(config.ControlConfig.DataDir, "kubeconfig-"+version.Program+".yaml") def = false } kubeConfigSymlink := kubeConfig if config.ControlConfig.KubeConfigOutput != "" { kubeConfig = config.ControlConfig.KubeConfigOutput } if isSymlink(kubeConfigSymlink) { if err := os.Remove(kubeConfigSymlink); err != nil { logrus.Errorf("Failed to remove kubeconfig symlink") } } if err = clientaccess.WriteClientKubeConfig(kubeConfig, url, config.ControlConfig.Runtime.ServerCA, config.ControlConfig.Runtime.ClientAdminCert, config.ControlConfig.Runtime.ClientAdminKey); err == nil { logrus.Infof("Wrote kubeconfig %s", kubeConfig) } else { logrus.Errorf("Failed to generate kubeconfig: %v", err) return err } if config.ControlConfig.KubeConfigMode != "" { mode, err := strconv.ParseInt(config.ControlConfig.KubeConfigMode, 8, 0) if err == nil { util.SetFileModeForPath(kubeConfig, os.FileMode(mode)) } else { logrus.Errorf("Failed to set %s to mode %s: %v", kubeConfig, os.FileMode(mode), err) } } else { util.SetFileModeForPath(kubeConfig, os.FileMode(0600)) } if kubeConfigSymlink != kubeConfig { if err := writeConfigSymlink(kubeConfig, kubeConfigSymlink); err != nil { logrus.Errorf("Failed to write kubeconfig symlink: %v", err) } } if def { logrus.Infof("Run: %s kubectl", filepath.Base(os.Args[0])) } return nil } func setupDataDirAndChdir(config *config.Control) error { var ( err error ) config.DataDir, err = ResolveDataDir(config.DataDir) if err != nil { return err } dataDir := config.DataDir if err := os.MkdirAll(dataDir, 0700); err != nil { return errors.Wrapf(err, "can not mkdir %s", dataDir) } if err := os.Chdir(dataDir); err != nil { return errors.Wrapf(err, "can not chdir %s", dataDir) } return nil } func printToken(httpsPort int, advertiseIP, prefix, cmd string) { ip := advertiseIP if ip == "" { hostIP, err := net.ChooseHostInterface() if err != nil { logrus.Errorf("Failed to choose interface: %v", err) } ip = hostIP.String() } logrus.Infof("%s %s %s -s https://%s:%d -t ${NODE_TOKEN}", prefix, version.Program, cmd, ip, httpsPort) } func FormatToken(token string, certFile string) (string, error) { if len(token) == 0 { return token, nil } prefix := "K10" if len(certFile) > 0 { bytes, err := ioutil.ReadFile(certFile) if err != nil { return "", nil } digest := sha256.Sum256(bytes) prefix = "K10" + hex.EncodeToString(digest[:]) + "::" } return prefix + token, nil } func writeToken(token, file, certs string) error { if len(token) == 0 { return nil } token, err := FormatToken(token, certs) if err != nil { return err } return ioutil.WriteFile(file, []byte(token+"\n"), 0600) } func setNoProxyEnv(config *config.Control) error { splitter := func(c rune) bool { return c == ',' } envList := []string{} envList = append(envList, strings.FieldsFunc(os.Getenv("NO_PROXY"), splitter)...) envList = append(envList, strings.FieldsFunc(os.Getenv("no_proxy"), splitter)...) envList = append(envList, ".svc", "."+config.ClusterDomain, config.ClusterIPRange.String(), config.ServiceIPRange.String(), ) os.Unsetenv("no_proxy") return os.Setenv("NO_PROXY", strings.Join(envList, ",")) } func writeConfigSymlink(kubeconfig, kubeconfigSymlink string) error { if err := os.Remove(kubeconfigSymlink); err != nil && !os.IsNotExist(err) { return fmt.Errorf("failed to remove %s file: %v", kubeconfigSymlink, err) } if err := os.MkdirAll(filepath.Dir(kubeconfigSymlink), 0755); err != nil { return fmt.Errorf("failed to create path for symlink: %v", err) } if err := os.Symlink(kubeconfig, kubeconfigSymlink); err != nil { return fmt.Errorf("failed to create symlink: %v", err) } return nil } func isSymlink(config string) bool { if fi, err := os.Lstat(config); err == nil && (fi.Mode()&os.ModeSymlink == os.ModeSymlink) { return true } return false } func setControlPlaneRoleLabel(ctx context.Context, nodes v1.NodeClient, config *Config) error { if config.DisableAgent || config.ControlConfig.DisableAPIServer { return nil } for { nodeName := os.Getenv("NODE_NAME") if nodeName == "" { logrus.Info("Waiting for control-plane node agent startup") time.Sleep(1 * time.Second) continue } node, err := nodes.Get(nodeName, metav1.GetOptions{}) if err != nil { logrus.Infof("Waiting for control-plane node %s startup: %v", nodeName, err) time.Sleep(1 * time.Second) continue } // remove etcd label if etcd is disabled var etcdRoleLabelExists bool if config.ControlConfig.DisableETCD { if _, ok := node.Labels[ETCDRoleLabelKey]; ok { delete(node.Labels, ETCDRoleLabelKey) etcdRoleLabelExists = true } } if v, ok := node.Labels[ControlPlaneRoleLabelKey]; ok && v == "true" && !etcdRoleLabelExists { break } if node.Labels == nil { node.Labels = make(map[string]string) } node.Labels[ControlPlaneRoleLabelKey] = "true" node.Labels[MasterRoleLabelKey] = "true" _, err = nodes.Update(node) if err == nil { logrus.Infof("Control-plane role label has been set successfully on node: %s", nodeName) break } select { case <-ctx.Done(): return ctx.Err() case <-time.After(time.Second): } } return nil } func setClusterDNSConfig(ctx context.Context, controlConfig *Config, configMap v1.ConfigMapClient) error { // check if configmap already exists _, err := configMap.Get("kube-system", "cluster-dns", metav1.GetOptions{}) if err == nil { logrus.Infof("Cluster dns configmap already exists") return nil } clusterDNS := controlConfig.ControlConfig.ClusterDNS clusterDomain := controlConfig.ControlConfig.ClusterDomain c := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "cluster-dns", Namespace: "kube-system", }, Data: map[string]string{ "clusterDNS": clusterDNS.String(), "clusterDomain": clusterDomain, }, } for { _, err = configMap.Create(c) if err == nil { logrus.Infof("Cluster dns configmap has been set successfully") break } logrus.Infof("Waiting for control-plane dns startup: %v", err) select { case <-ctx.Done(): return ctx.Err() case <-time.After(time.Second): } } return nil }
1
9,287
Does the errors returned from these controllers indicate which controller threw the error? If not, there might be some value in making the "CustomControllers" type a `map[string]func(ctx context.Context, sc *server.Context) error` with the name of the controller as the key and include the key in this error string. This would apply for below as well.
k3s-io-k3s
go
@@ -2204,8 +2204,13 @@ CheckedError Parser::DoParse(const char *source, } uoffset_t toff; ECHECK(ParseTable(*root_struct_def_, nullptr, &toff)); - builder_.Finish(Offset<Table>(toff), - file_identifier_.length() ? file_identifier_.c_str() : nullptr); + if (opts.prefix_size) { + builder_.FinishSizePrefixed(Offset<Table>(toff), + file_identifier_.length() ? file_identifier_.c_str() : nullptr); + } else { + builder_.Finish(Offset<Table>(toff), + file_identifier_.length() ? file_identifier_.c_str() : nullptr); + } } else if (IsIdent("enum")) { ECHECK(ParseEnum(false, nullptr)); } else if (IsIdent("union")) {
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <list> #include <iostream> #ifdef _WIN32 #if !defined(_USE_MATH_DEFINES) #define _USE_MATH_DEFINES // For M_PI. #endif // !defined(_USE_MATH_DEFINES) #endif // _WIN32 #include <math.h> #include "flatbuffers/idl.h" #include "flatbuffers/util.h" namespace flatbuffers { const char *const kTypeNames[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ IDLTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD nullptr }; const char kTypeSizes[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ sizeof(CTYPE), FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; // The enums in the reflection schema should match the ones we use internally. // Compare the last element to check if these go out of sync. static_assert(BASE_TYPE_UNION == static_cast<BaseType>(reflection::Union), "enums don't match"); // Any parsing calls have to be wrapped in this macro, which automates // handling of recursive error checking a bit. It will check the received // CheckedError object, and return straight away on error. #define ECHECK(call) { auto ce = (call); if (ce.Check()) return ce; } // These two functions are called hundreds of times below, so define a short // form: #define NEXT() ECHECK(Next()) #define EXPECT(tok) ECHECK(Expect(tok)) static bool ValidateUTF8(const std::string &str) { const char *s = &str[0]; const char * const sEnd = s + str.length(); while (s < sEnd) { if (FromUTF8(&s) < 0) { return false; } } return true; } CheckedError Parser::Error(const std::string &msg) { error_ = file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : ""; #ifdef _WIN32 error_ += "(" + NumToString(line_) + ")"; // MSVC alike #else if (file_being_parsed_.length()) error_ += ":"; error_ += NumToString(line_) + ":0"; // gcc alike #endif error_ += ": error: " + msg; return CheckedError(true); } inline CheckedError NoError() { return CheckedError(false); } inline std::string OutOfRangeErrorMsg(int64_t val, const std::string &op, int64_t limit) { const std::string cause = NumToString(val) + op + NumToString(limit); return "constant does not fit (" + cause + ")"; } // Ensure that integer values we parse fit inside the declared integer type. CheckedError Parser::CheckInRange(int64_t val, int64_t min, int64_t max) { if (val < min) return Error(OutOfRangeErrorMsg(val, " < ", min)); else if (val > max) return Error(OutOfRangeErrorMsg(val, " > ", max)); else return NoError(); } // atot: templated version of atoi/atof: convert a string to an instance of T. template<typename T> inline CheckedError atot(const char *s, Parser &parser, T *val) { int64_t i = StringToInt(s); const int64_t min = flatbuffers::numeric_limits<T>::min(); const int64_t max = flatbuffers::numeric_limits<T>::max(); ECHECK(parser.CheckInRange(i, min, max)); *val = (T)i; return NoError(); } template<> inline CheckedError atot<uint64_t>(const char *s, Parser &parser, uint64_t *val) { (void)parser; *val = StringToUInt(s); return NoError(); } template<> inline CheckedError atot<bool>(const char *s, Parser &parser, bool *val) { (void)parser; *val = 0 != atoi(s); return NoError(); } template<> inline CheckedError atot<float>(const char *s, Parser &parser, float *val) { (void)parser; *val = static_cast<float>(strtod(s, nullptr)); return NoError(); } template<> inline CheckedError atot<double>(const char *s, Parser &parser, double *val) { (void)parser; *val = strtod(s, nullptr); return NoError(); } template<> inline CheckedError atot<Offset<void>>(const char *s, Parser &parser, Offset<void> *val) { (void)parser; *val = Offset<void>(atoi(s)); return NoError(); } std::string Namespace::GetFullyQualifiedName(const std::string &name, size_t max_components) const { // Early exit if we don't have a defined namespace. if (components.size() == 0 || !max_components) { return name; } std::stringstream stream; for (size_t i = 0; i < std::min(components.size(), max_components); i++) { if (i) { stream << "."; } stream << components[i]; } if (name.length()) stream << "." << name; return stream.str(); } // Declare tokens we'll use. Single character tokens are represented by their // ascii character code (e.g. '{'), others above 256. #define FLATBUFFERS_GEN_TOKENS(TD) \ TD(Eof, 256, "end of file") \ TD(StringConstant, 257, "string constant") \ TD(IntegerConstant, 258, "integer constant") \ TD(FloatConstant, 259, "float constant") \ TD(Identifier, 260, "identifier") #ifdef __GNUC__ __extension__ // Stop GCC complaining about trailing comma with -Wpendantic. #endif enum { #define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) kToken ## NAME = VALUE, FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN) #undef FLATBUFFERS_TOKEN }; static std::string TokenToString(int t) { static const char *tokens[] = { #define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) STRING, FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN) #undef FLATBUFFERS_TOKEN #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ IDLTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; if (t < 256) { // A single ascii char token. std::string s; s.append(1, static_cast<char>(t)); return s; } else { // Other tokens. return tokens[t - 256]; } } std::string Parser::TokenToStringId(int t) { return t == kTokenIdentifier ? attribute_ : TokenToString(t); } // Parses exactly nibbles worth of hex digits into a number, or error. CheckedError Parser::ParseHexNum(int nibbles, uint64_t *val) { for (int i = 0; i < nibbles; i++) if (!isxdigit(static_cast<const unsigned char>(cursor_[i]))) return Error("escape code must be followed by " + NumToString(nibbles) + " hex digits"); std::string target(cursor_, cursor_ + nibbles); *val = StringToUInt(target.c_str(), nullptr, 16); cursor_ += nibbles; return NoError(); } CheckedError Parser::SkipByteOrderMark() { if (static_cast<unsigned char>(*cursor_) != 0xef) return NoError(); cursor_++; if (static_cast<unsigned char>(*cursor_) != 0xbb) return Error("invalid utf-8 byte order mark"); cursor_++; if (static_cast<unsigned char>(*cursor_) != 0xbf) return Error("invalid utf-8 byte order mark"); cursor_++; return NoError(); } bool IsIdentifierStart(char c) { return isalpha(static_cast<unsigned char>(c)) || c == '_'; } CheckedError Parser::Next() { doc_comment_.clear(); bool seen_newline = false; attribute_.clear(); for (;;) { char c = *cursor_++; token_ = c; switch (c) { case '\0': cursor_--; token_ = kTokenEof; return NoError(); case ' ': case '\r': case '\t': break; case '\n': line_++; seen_newline = true; break; case '{': case '}': case '(': case ')': case '[': case ']': case ',': case ':': case ';': case '=': return NoError(); case '.': if(!isdigit(static_cast<const unsigned char>(*cursor_))) return NoError(); return Error("floating point constant can\'t start with \".\""); case '\"': case '\'': { int unicode_high_surrogate = -1; while (*cursor_ != c) { if (*cursor_ < ' ' && *cursor_ >= 0) return Error("illegal character in string constant"); if (*cursor_ == '\\') { cursor_++; if (unicode_high_surrogate != -1 && *cursor_ != 'u') { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } switch (*cursor_) { case 'n': attribute_ += '\n'; cursor_++; break; case 't': attribute_ += '\t'; cursor_++; break; case 'r': attribute_ += '\r'; cursor_++; break; case 'b': attribute_ += '\b'; cursor_++; break; case 'f': attribute_ += '\f'; cursor_++; break; case '\"': attribute_ += '\"'; cursor_++; break; case '\'': attribute_ += '\''; cursor_++; break; case '\\': attribute_ += '\\'; cursor_++; break; case '/': attribute_ += '/'; cursor_++; break; case 'x': { // Not in the JSON standard cursor_++; uint64_t val; ECHECK(ParseHexNum(2, &val)); attribute_ += static_cast<char>(val); break; } case 'u': { cursor_++; uint64_t val; ECHECK(ParseHexNum(4, &val)); if (val >= 0xD800 && val <= 0xDBFF) { if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (multiple high surrogates)"); } else { unicode_high_surrogate = static_cast<int>(val); } } else if (val >= 0xDC00 && val <= 0xDFFF) { if (unicode_high_surrogate == -1) { return Error( "illegal Unicode sequence (unpaired low surrogate)"); } else { int code_point = 0x10000 + ((unicode_high_surrogate & 0x03FF) << 10) + (val & 0x03FF); ToUTF8(code_point, &attribute_); unicode_high_surrogate = -1; } } else { if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } ToUTF8(static_cast<int>(val), &attribute_); } break; } default: return Error("unknown escape code in string constant"); } } else { // printable chars + UTF-8 bytes if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } attribute_ += *cursor_++; } } if (unicode_high_surrogate != -1) { return Error( "illegal Unicode sequence (unpaired high surrogate)"); } cursor_++; if (!opts.allow_non_utf8 && !ValidateUTF8(attribute_)) { return Error("illegal UTF-8 sequence"); } token_ = kTokenStringConstant; return NoError(); } case '/': if (*cursor_ == '/') { const char *start = ++cursor_; while (*cursor_ && *cursor_ != '\n' && *cursor_ != '\r') cursor_++; if (*start == '/') { // documentation comment if (cursor_ != source_ && !seen_newline) return Error( "a documentation comment should be on a line on its own"); doc_comment_.push_back(std::string(start + 1, cursor_)); } break; } else if (*cursor_ == '*') { cursor_++; // TODO: make nested. while (*cursor_ != '*' || cursor_[1] != '/') { if (*cursor_ == '\n') line_++; if (!*cursor_) return Error("end of file in comment"); cursor_++; } cursor_ += 2; break; } // fall thru default: if (IsIdentifierStart(c)) { // Collect all chars of an identifier: const char *start = cursor_ - 1; while (isalnum(static_cast<unsigned char>(*cursor_)) || *cursor_ == '_') cursor_++; attribute_.append(start, cursor_); token_ = kTokenIdentifier; return NoError(); } else if (isdigit(static_cast<unsigned char>(c)) || c == '-') { const char *start = cursor_ - 1; if (c == '-' && *cursor_ == '0' && (cursor_[1] == 'x' || cursor_[1] == 'X')) { ++start; ++cursor_; attribute_.append(&c, &c + 1); c = '0'; } if (c == '0' && (*cursor_ == 'x' || *cursor_ == 'X')) { cursor_++; while (isxdigit(static_cast<unsigned char>(*cursor_))) cursor_++; attribute_.append(start + 2, cursor_); attribute_ = NumToString(static_cast<int64_t>( StringToUInt(attribute_.c_str(), nullptr, 16))); token_ = kTokenIntegerConstant; return NoError(); } while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++; if (*cursor_ == '.' || *cursor_ == 'e' || *cursor_ == 'E') { if (*cursor_ == '.') { cursor_++; while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++; } // See if this float has a scientific notation suffix. Both JSON // and C++ (through strtod() we use) have the same format: if (*cursor_ == 'e' || *cursor_ == 'E') { cursor_++; if (*cursor_ == '+' || *cursor_ == '-') cursor_++; while (isdigit(static_cast<unsigned char>(*cursor_))) cursor_++; } token_ = kTokenFloatConstant; } else { token_ = kTokenIntegerConstant; } attribute_.append(start, cursor_); return NoError(); } std::string ch; ch = c; if (c < ' ' || c > '~') ch = "code: " + NumToString(c); return Error("illegal character: " + ch); } } } // Check if a given token is next. bool Parser::Is(int t) { return t == token_; } bool Parser::IsIdent(const char *id) { return token_ == kTokenIdentifier && attribute_ == id; } // Expect a given token to be next, consume it, or error if not present. CheckedError Parser::Expect(int t) { if (t != token_) { return Error("expecting: " + TokenToString(t) + " instead got: " + TokenToStringId(token_)); } NEXT(); return NoError(); } CheckedError Parser::ParseNamespacing(std::string *id, std::string *last) { while (Is('.')) { NEXT(); *id += "."; *id += attribute_; if (last) *last = attribute_; EXPECT(kTokenIdentifier); } return NoError(); } EnumDef *Parser::LookupEnum(const std::string &id) { // Search thru parent namespaces. for (int components = static_cast<int>(current_namespace_->components.size()); components >= 0; components--) { auto ed = enums_.Lookup( current_namespace_->GetFullyQualifiedName(id, components)); if (ed) return ed; } return nullptr; } CheckedError Parser::ParseTypeIdent(Type &type) { std::string id = attribute_; EXPECT(kTokenIdentifier); ECHECK(ParseNamespacing(&id, nullptr)); auto enum_def = LookupEnum(id); if (enum_def) { type = enum_def->underlying_type; if (enum_def->is_union) type.base_type = BASE_TYPE_UNION; } else { type.base_type = BASE_TYPE_STRUCT; type.struct_def = LookupCreateStruct(id); } return NoError(); } // Parse any IDL type. CheckedError Parser::ParseType(Type &type) { if (token_ == kTokenIdentifier) { if (IsIdent("bool")) { type.base_type = BASE_TYPE_BOOL; NEXT(); } else if (IsIdent("byte") || IsIdent("int8")) { type.base_type = BASE_TYPE_CHAR; NEXT(); } else if (IsIdent("ubyte") || IsIdent("uint8")) { type.base_type = BASE_TYPE_UCHAR; NEXT(); } else if (IsIdent("short") || IsIdent("int16")) { type.base_type = BASE_TYPE_SHORT; NEXT(); } else if (IsIdent("ushort") || IsIdent("uint16")) { type.base_type = BASE_TYPE_USHORT; NEXT(); } else if (IsIdent("int") || IsIdent("int32")) { type.base_type = BASE_TYPE_INT; NEXT(); } else if (IsIdent("uint") || IsIdent("uint32")) { type.base_type = BASE_TYPE_UINT; NEXT(); } else if (IsIdent("long") || IsIdent("int64")) { type.base_type = BASE_TYPE_LONG; NEXT(); } else if (IsIdent("ulong") || IsIdent("uint64")) { type.base_type = BASE_TYPE_ULONG; NEXT(); } else if (IsIdent("float") || IsIdent("float32")) { type.base_type = BASE_TYPE_FLOAT; NEXT(); } else if (IsIdent("double") || IsIdent("float64")) { type.base_type = BASE_TYPE_DOUBLE; NEXT(); } else if (IsIdent("string")) { type.base_type = BASE_TYPE_STRING; NEXT(); } else { ECHECK(ParseTypeIdent(type)); } } else if (token_ == '[') { NEXT(); Type subtype; ECHECK(ParseType(subtype)); if (subtype.base_type == BASE_TYPE_VECTOR) { // We could support this, but it will complicate things, and it's // easier to work around with a struct around the inner vector. return Error( "nested vector types not supported (wrap in table first)."); } type = Type(BASE_TYPE_VECTOR, subtype.struct_def, subtype.enum_def); type.element = subtype.base_type; EXPECT(']'); } else { return Error("illegal type syntax"); } return NoError(); } CheckedError Parser::AddField(StructDef &struct_def, const std::string &name, const Type &type, FieldDef **dest) { auto &field = *new FieldDef(); field.value.offset = FieldIndexToOffset(static_cast<voffset_t>(struct_def.fields.vec.size())); field.name = name; field.file = struct_def.file; field.value.type = type; if (struct_def.fixed) { // statically compute the field offset auto size = InlineSize(type); auto alignment = InlineAlignment(type); // structs_ need to have a predictable format, so we need to align to // the largest scalar struct_def.minalign = std::max(struct_def.minalign, alignment); struct_def.PadLastField(alignment); field.value.offset = static_cast<voffset_t>(struct_def.bytesize); struct_def.bytesize += size; } if (struct_def.fields.Add(name, &field)) return Error("field already exists: " + name); *dest = &field; return NoError(); } CheckedError Parser::ParseField(StructDef &struct_def) { std::string name = attribute_; if (structs_.Lookup(name)) return Error("field name can not be the same as table/struct name"); std::vector<std::string> dc = doc_comment_; EXPECT(kTokenIdentifier); EXPECT(':'); Type type; ECHECK(ParseType(type)); if (struct_def.fixed && !IsScalar(type.base_type) && !IsStruct(type)) return Error("structs_ may contain only scalar or struct fields"); FieldDef *typefield = nullptr; if (type.base_type == BASE_TYPE_UNION) { // For union fields, add a second auto-generated field to hold the type, // with a special suffix. ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), type.enum_def->underlying_type, &typefield)); } else if (type.base_type == BASE_TYPE_VECTOR && type.element == BASE_TYPE_UNION) { // Only cpp, js and ts supports the union vector feature so far. if (!SupportsVectorOfUnions()) { return Error("Vectors of unions are not yet supported in all " "the specified programming languages."); } // For vector of union fields, add a second auto-generated vector field to // hold the types, with a special suffix. Type union_vector(BASE_TYPE_VECTOR, nullptr, type.enum_def); union_vector.element = BASE_TYPE_UTYPE; ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), union_vector, &typefield)); } FieldDef *field; ECHECK(AddField(struct_def, name, type, &field)); if (token_ == '=') { NEXT(); if (!IsScalar(type.base_type)) return Error("default values currently only supported for scalars"); ECHECK(ParseSingleValue(field->value)); } if (IsFloat(field->value.type.base_type)) { if (!strpbrk(field->value.constant.c_str(), ".eE")) field->value.constant += ".0"; } if (type.enum_def && IsScalar(type.base_type) && !struct_def.fixed && !type.enum_def->attributes.Lookup("bit_flags") && !type.enum_def->ReverseLookup(static_cast<int>( StringToInt(field->value.constant.c_str())))) return Error("enum " + type.enum_def->name + " does not have a declaration for this field\'s default of " + field->value.constant); field->doc_comment = dc; ECHECK(ParseMetaData(&field->attributes)); field->deprecated = field->attributes.Lookup("deprecated") != nullptr; auto hash_name = field->attributes.Lookup("hash"); if (hash_name) { switch (type.base_type) { case BASE_TYPE_INT: case BASE_TYPE_UINT: { if (FindHashFunction32(hash_name->constant.c_str()) == nullptr) return Error("Unknown hashing algorithm for 32 bit types: " + hash_name->constant); break; } case BASE_TYPE_LONG: case BASE_TYPE_ULONG: { if (FindHashFunction64(hash_name->constant.c_str()) == nullptr) return Error("Unknown hashing algorithm for 64 bit types: " + hash_name->constant); break; } default: return Error( "only int, uint, long and ulong data types support hashing."); } } auto cpp_type = field->attributes.Lookup("cpp_type"); if (cpp_type) { if (!hash_name) return Error("cpp_type can only be used with a hashed field"); } if (field->deprecated && struct_def.fixed) return Error("can't deprecate fields in a struct"); field->required = field->attributes.Lookup("required") != nullptr; if (field->required && (struct_def.fixed || IsScalar(field->value.type.base_type))) return Error("only non-scalar fields in tables may be 'required'"); field->key = field->attributes.Lookup("key") != nullptr; if (field->key) { if (struct_def.has_key) return Error("only one field may be set as 'key'"); struct_def.has_key = true; if (!IsScalar(field->value.type.base_type)) { field->required = true; if (field->value.type.base_type != BASE_TYPE_STRING) return Error("'key' field must be string or scalar type"); } } field->native_inline = field->attributes.Lookup("native_inline") != nullptr; if (field->native_inline && !IsStruct(field->value.type)) return Error("native_inline can only be defined on structs'"); auto nested = field->attributes.Lookup("nested_flatbuffer"); if (nested) { if (nested->type.base_type != BASE_TYPE_STRING) return Error( "nested_flatbuffer attribute must be a string (the root type)"); if (field->value.type.base_type != BASE_TYPE_VECTOR || field->value.type.element != BASE_TYPE_UCHAR) return Error( "nested_flatbuffer attribute may only apply to a vector of ubyte"); // This will cause an error if the root type of the nested flatbuffer // wasn't defined elsewhere. LookupCreateStruct(nested->constant); // Keep a pointer to StructDef in FieldDef to simplify re-use later auto nested_qualified_name = current_namespace_->GetFullyQualifiedName(nested->constant); field->nested_flatbuffer = structs_.Lookup(nested_qualified_name); } if (field->attributes.Lookup("flexbuffer")) { field->flexbuffer = true; uses_flexbuffers_ = true; if (field->value.type.base_type != BASE_TYPE_VECTOR || field->value.type.element != BASE_TYPE_UCHAR) return Error( "flexbuffer attribute may only apply to a vector of ubyte"); } if (typefield) { if (!IsScalar(typefield->value.type.base_type)) { // this is a union vector field typefield->required = field->required; } // If this field is a union, and it has a manually assigned id, // the automatically added type field should have an id as well (of N - 1). auto attr = field->attributes.Lookup("id"); if (attr) { auto id = atoi(attr->constant.c_str()); auto val = new Value(); val->type = attr->type; val->constant = NumToString(id - 1); typefield->attributes.Add("id", val); } } EXPECT(';'); return NoError(); } CheckedError Parser::ParseString(Value &val) { auto s = attribute_; EXPECT(kTokenStringConstant); val.constant = NumToString(builder_.CreateString(s).o); return NoError(); } CheckedError Parser::ParseComma() { if (!opts.protobuf_ascii_alike) EXPECT(','); return NoError(); } CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field, size_t parent_fieldn, const StructDef *parent_struct_def) { switch (val.type.base_type) { case BASE_TYPE_UNION: { assert(field); std::string constant; // Find corresponding type field we may have already parsed. for (auto elem = field_stack_.rbegin(); elem != field_stack_.rbegin() + parent_fieldn; ++elem) { auto &type = elem->second->value.type; if (type.base_type == BASE_TYPE_UTYPE && type.enum_def == val.type.enum_def) { constant = elem->first.constant; break; } } if (constant.empty()) { // We haven't seen the type field yet. Sadly a lot of JSON writers // output these in alphabetical order, meaning it comes after this // value. So we scan past the value to find it, then come back here. auto type_name = field->name + UnionTypeFieldSuffix(); assert(parent_struct_def); auto type_field = parent_struct_def->fields.Lookup(type_name); assert(type_field); // Guaranteed by ParseField(). // Remember where we are in the source file, so we can come back here. auto backup = *static_cast<ParserState *>(this); ECHECK(SkipAnyJsonValue()); // The table. ECHECK(ParseComma()); auto next_name = attribute_; if (Is(kTokenStringConstant)) { NEXT(); } else { EXPECT(kTokenIdentifier); } if (next_name != type_name) return Error("missing type field after this union value: " + type_name); EXPECT(':'); Value type_val = type_field->value; ECHECK(ParseAnyValue(type_val, type_field, 0, nullptr)); constant = type_val.constant; // Got the information we needed, now rewind: *static_cast<ParserState *>(this) = backup; } uint8_t enum_idx; ECHECK(atot(constant.c_str(), *this, &enum_idx)); auto enum_val = val.type.enum_def->ReverseLookup(enum_idx); if (!enum_val) return Error("illegal type id for: " + field->name); if (enum_val->union_type.base_type == BASE_TYPE_STRUCT) { ECHECK(ParseTable(*enum_val->union_type.struct_def, &val.constant, nullptr)); if (enum_val->union_type.struct_def->fixed) { // All BASE_TYPE_UNION values are offsets, so turn this into one. SerializeStruct(*enum_val->union_type.struct_def, val); builder_.ClearOffsets(); val.constant = NumToString(builder_.GetSize()); } } else if (enum_val->union_type.base_type == BASE_TYPE_STRING) { ECHECK(ParseString(val)); } else { assert(false); } break; } case BASE_TYPE_STRUCT: ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr)); break; case BASE_TYPE_STRING: { ECHECK(ParseString(val)); break; } case BASE_TYPE_VECTOR: { uoffset_t off; ECHECK(ParseVector(val.type.VectorType(), &off)); val.constant = NumToString(off); break; } case BASE_TYPE_INT: case BASE_TYPE_UINT: case BASE_TYPE_LONG: case BASE_TYPE_ULONG: { if (field && field->attributes.Lookup("hash") && (token_ == kTokenIdentifier || token_ == kTokenStringConstant)) { ECHECK(ParseHash(val, field)); } else { ECHECK(ParseSingleValue(val)); } break; } default: ECHECK(ParseSingleValue(val)); break; } return NoError(); } void Parser::SerializeStruct(const StructDef &struct_def, const Value &val) { assert(val.constant.length() == struct_def.bytesize); builder_.Align(struct_def.minalign); builder_.PushBytes(reinterpret_cast<const uint8_t *>(val.constant.c_str()), struct_def.bytesize); builder_.AddStructOffset(val.offset, builder_.GetSize()); } CheckedError Parser::ParseTableDelimiters(size_t &fieldn, const StructDef *struct_def, ParseTableDelimitersBody body, void *state) { // We allow tables both as JSON object{ .. } with field names // or vector[..] with all fields in order char terminator = '}'; bool is_nested_vector = struct_def && Is('['); if (is_nested_vector) { NEXT(); terminator = ']'; } else { EXPECT('{'); } for (;;) { if ((!opts.strict_json || !fieldn) && Is(terminator)) break; std::string name; if (is_nested_vector) { if (fieldn > struct_def->fields.vec.size()) { return Error("too many unnamed fields in nested array"); } name = struct_def->fields.vec[fieldn]->name; } else { name = attribute_; if (Is(kTokenStringConstant)) { NEXT(); } else { EXPECT(opts.strict_json ? kTokenStringConstant : kTokenIdentifier); } if (!opts.protobuf_ascii_alike || !(Is('{') || Is('['))) EXPECT(':'); } ECHECK(body(name, fieldn, struct_def, state)); if (Is(terminator)) break; ECHECK(ParseComma()); } NEXT(); if (is_nested_vector && fieldn != struct_def->fields.vec.size()) { return Error("wrong number of unnamed fields in table vector"); } return NoError(); } CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value, uoffset_t *ovalue) { size_t fieldn_outer = 0; auto err = ParseTableDelimiters(fieldn_outer, &struct_def, [](const std::string &name, size_t &fieldn, const StructDef *struct_def_inner, void *state) -> CheckedError { Parser *parser = static_cast<Parser *>(state); if (name == "$schema") { ECHECK(parser->Expect(kTokenStringConstant)); return NoError(); } auto field = struct_def_inner->fields.Lookup(name); if (!field) { if (!parser->opts.skip_unexpected_fields_in_json) { return parser->Error("unknown field: " + name); } else { ECHECK(parser->SkipAnyJsonValue()); } } else { if (parser->IsIdent("null")) { ECHECK(parser->Next()); // Ignore this field. } else { Value val = field->value; if (field->flexbuffer) { flexbuffers::Builder builder(1024, flexbuffers::BUILDER_FLAG_SHARE_ALL); ECHECK(parser->ParseFlexBufferValue(&builder)); builder.Finish(); auto off = parser->builder_.CreateVector(builder.GetBuffer()); val.constant = NumToString(off.o); } else if (field->nested_flatbuffer) { ECHECK(parser->ParseNestedFlatbuffer(val, field, fieldn, struct_def_inner)); } else { ECHECK(parser->ParseAnyValue(val, field, fieldn, struct_def_inner)); } // Hardcoded insertion-sort with error-check. // If fields are specified in order, then this loop exits immediately. auto elem = parser->field_stack_.rbegin(); for (; elem != parser->field_stack_.rbegin() + fieldn; ++elem) { auto existing_field = elem->second; if (existing_field == field) return parser->Error("field set more than once: " + field->name); if (existing_field->value.offset < field->value.offset) break; } // Note: elem points to before the insertion point, thus .base() points // to the correct spot. parser->field_stack_.insert(elem.base(), std::make_pair(val, field)); fieldn++; } } return NoError(); }, this); ECHECK(err); // Check if all required fields are parsed. for (auto field_it = struct_def.fields.vec.begin(); field_it != struct_def.fields.vec.end(); ++field_it) { auto required_field = *field_it; if (!required_field->required) { continue; } bool found = false; for (auto pf_it = field_stack_.end() - fieldn_outer; pf_it != field_stack_.end(); ++pf_it) { auto parsed_field = pf_it->second; if (parsed_field == required_field) { found = true; break; } } if (!found) { return Error("required field is missing: " + required_field->name + " in " + struct_def.name); } } if (struct_def.fixed && fieldn_outer != struct_def.fields.vec.size()) return Error("struct: wrong number of initializers: " + struct_def.name); auto start = struct_def.fixed ? builder_.StartStruct(struct_def.minalign) : builder_.StartTable(); for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { // Go through elements in reverse, since we're building the data backwards. for (auto it = field_stack_.rbegin(); it != field_stack_.rbegin() + fieldn_outer; ++it) { auto &field_value = it->first; auto field = it->second; if (!struct_def.sortbysize || size == SizeOf(field_value.type.base_type)) { switch (field_value.type.base_type) { #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: \ builder_.Pad(field->padding); \ if (struct_def.fixed) { \ CTYPE val; \ ECHECK(atot(field_value.constant.c_str(), *this, &val)); \ builder_.PushElement(val); \ } else { \ CTYPE val, valdef; \ ECHECK(atot(field_value.constant.c_str(), *this, &val)); \ ECHECK(atot(field->value.constant.c_str(), *this, &valdef)); \ builder_.AddElement(field_value.offset, val, valdef); \ } \ break; FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD); #undef FLATBUFFERS_TD #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: \ builder_.Pad(field->padding); \ if (IsStruct(field->value.type)) { \ SerializeStruct(*field->value.type.struct_def, field_value); \ } else { \ CTYPE val; \ ECHECK(atot(field_value.constant.c_str(), *this, &val)); \ builder_.AddOffset(field_value.offset, val); \ } \ break; FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD); #undef FLATBUFFERS_TD } } } } for (size_t i = 0; i < fieldn_outer; i++) field_stack_.pop_back(); if (struct_def.fixed) { builder_.ClearOffsets(); builder_.EndStruct(); assert(value); // Temporarily store this struct in the value string, since it is to // be serialized in-place elsewhere. value->assign( reinterpret_cast<const char *>(builder_.GetCurrentBufferPointer()), struct_def.bytesize); builder_.PopBytes(struct_def.bytesize); assert(!ovalue); } else { auto val = builder_.EndTable(start); if (ovalue) *ovalue = val; if (value) *value = NumToString(val); } return NoError(); } CheckedError Parser::ParseVectorDelimiters(size_t &count, ParseVectorDelimitersBody body, void *state) { EXPECT('['); for (;;) { if ((!opts.strict_json || !count) && Is(']')) break; ECHECK(body(count, state)); count++; if (Is(']')) break; ECHECK(ParseComma()); } NEXT(); return NoError(); } CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue) { size_t count = 0; std::pair<Parser *, const Type &> parser_and_type_state(this, type); auto err = ParseVectorDelimiters(count, [](size_t &, void *state) -> CheckedError { auto *parser_and_type = static_cast<std::pair<Parser *, const Type &> *>(state); auto *parser = parser_and_type->first; Value val; val.type = parser_and_type->second; ECHECK(parser->ParseAnyValue(val, nullptr, 0, nullptr)); parser->field_stack_.push_back(std::make_pair(val, nullptr)); return NoError(); }, &parser_and_type_state); ECHECK(err); builder_.StartVector(count * InlineSize(type) / InlineAlignment(type), InlineAlignment(type)); for (size_t i = 0; i < count; i++) { // start at the back, since we're building the data backwards. auto &val = field_stack_.back().first; switch (val.type.base_type) { #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ case BASE_TYPE_ ## ENUM: \ if (IsStruct(val.type)) SerializeStruct(*val.type.struct_def, val); \ else { \ CTYPE elem; \ ECHECK(atot(val.constant.c_str(), *this, &elem)); \ builder_.PushElement(elem); \ } \ break; FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD } field_stack_.pop_back(); } builder_.ClearOffsets(); *ovalue = builder_.EndVector(count); return NoError(); } CheckedError Parser::ParseNestedFlatbuffer(Value &val, FieldDef *field, size_t fieldn, const StructDef *parent_struct_def) { if (token_ == '[') {// backwards compat for 'legacy' ubyte buffers ECHECK(ParseAnyValue(val, field, fieldn, parent_struct_def)); } else { auto cursor_at_value_begin = cursor_; ECHECK(SkipAnyJsonValue()); std::string substring(cursor_at_value_begin -1 , cursor_ -1); // Create and initialize new parser Parser nested_parser; assert(field->nested_flatbuffer); nested_parser.root_struct_def_ = field->nested_flatbuffer; nested_parser.enums_ = enums_; nested_parser.opts = opts; nested_parser.uses_flexbuffers_ = uses_flexbuffers_; // Parse JSON substring into new flatbuffer builder using nested_parser if (!nested_parser.Parse(substring.c_str(), nullptr, nullptr)) { ECHECK(Error(nested_parser.error_)); } auto off = builder_.CreateVector(nested_parser.builder_.GetBufferPointer(), nested_parser.builder_.GetSize()); val.constant = NumToString(off.o); // Clean nested_parser before destruction to avoid deleting the elements in the SymbolTables nested_parser.enums_.dict.clear(); nested_parser.enums_.vec.clear(); } return NoError(); } CheckedError Parser::ParseMetaData(SymbolTable<Value> *attributes) { if (Is('(')) { NEXT(); for (;;) { auto name = attribute_; EXPECT(kTokenIdentifier); if (known_attributes_.find(name) == known_attributes_.end()) return Error("user define attributes must be declared before use: " + name); auto e = new Value(); attributes->Add(name, e); if (Is(':')) { NEXT(); ECHECK(ParseSingleValue(*e)); } if (Is(')')) { NEXT(); break; } EXPECT(','); } } return NoError(); } CheckedError Parser::TryTypedValue(int dtoken, bool check, Value &e, BaseType req, bool *destmatch) { bool match = dtoken == token_; if (match) { *destmatch = true; e.constant = attribute_; if (!check) { if (e.type.base_type == BASE_TYPE_NONE) { e.type.base_type = req; } else { return Error(std::string("type mismatch: expecting: ") + kTypeNames[e.type.base_type] + ", found: " + kTypeNames[req]); } } NEXT(); } return NoError(); } CheckedError Parser::ParseEnumFromString(Type &type, int64_t *result) { *result = 0; // Parse one or more enum identifiers, separated by spaces. const char *next = attribute_.c_str(); do { const char *divider = strchr(next, ' '); std::string word; if (divider) { word = std::string(next, divider); next = divider + strspn(divider, " "); } else { word = next; next += word.length(); } if (type.enum_def) { // The field has an enum type auto enum_val = type.enum_def->vals.Lookup(word); if (!enum_val) return Error("unknown enum value: " + word + ", for enum: " + type.enum_def->name); *result |= enum_val->value; } else { // No enum type, probably integral field. if (!IsInteger(type.base_type)) return Error("not a valid value for this field: " + word); // TODO: could check if its a valid number constant here. const char *dot = strrchr(word.c_str(), '.'); if (!dot) return Error("enum values need to be qualified by an enum type"); std::string enum_def_str(word.c_str(), dot); std::string enum_val_str(dot + 1, word.c_str() + word.length()); auto enum_def = LookupEnum(enum_def_str); if (!enum_def) return Error("unknown enum: " + enum_def_str); auto enum_val = enum_def->vals.Lookup(enum_val_str); if (!enum_val) return Error("unknown enum value: " + enum_val_str); *result |= enum_val->value; } } while(*next); return NoError(); } CheckedError Parser::ParseHash(Value &e, FieldDef* field) { assert(field); Value *hash_name = field->attributes.Lookup("hash"); switch (e.type.base_type) { case BASE_TYPE_INT: { auto hash = FindHashFunction32(hash_name->constant.c_str()); int32_t hashed_value = static_cast<int32_t>(hash(attribute_.c_str())); e.constant = NumToString(hashed_value); break; } case BASE_TYPE_UINT: { auto hash = FindHashFunction32(hash_name->constant.c_str()); uint32_t hashed_value = hash(attribute_.c_str()); e.constant = NumToString(hashed_value); break; } case BASE_TYPE_LONG: { auto hash = FindHashFunction64(hash_name->constant.c_str()); int64_t hashed_value = static_cast<int64_t>(hash(attribute_.c_str())); e.constant = NumToString(hashed_value); break; } case BASE_TYPE_ULONG: { auto hash = FindHashFunction64(hash_name->constant.c_str()); uint64_t hashed_value = hash(attribute_.c_str()); e.constant = NumToString(hashed_value); break; } default: assert(0); } NEXT(); return NoError(); } CheckedError Parser::TokenError() { return Error("cannot parse value starting with: " + TokenToStringId(token_)); } CheckedError Parser::ParseSingleValue(Value &e) { // First see if this could be a conversion function: if (token_ == kTokenIdentifier && *cursor_ == '(') { auto functionname = attribute_; NEXT(); EXPECT('('); ECHECK(ParseSingleValue(e)); EXPECT(')'); #define FLATBUFFERS_FN_DOUBLE(name, op) \ if (functionname == name) { \ auto x = strtod(e.constant.c_str(), nullptr); \ e.constant = NumToString(op); \ } FLATBUFFERS_FN_DOUBLE("deg", x / M_PI * 180); FLATBUFFERS_FN_DOUBLE("rad", x * M_PI / 180); FLATBUFFERS_FN_DOUBLE("sin", sin(x)); FLATBUFFERS_FN_DOUBLE("cos", cos(x)); FLATBUFFERS_FN_DOUBLE("tan", tan(x)); FLATBUFFERS_FN_DOUBLE("asin", asin(x)); FLATBUFFERS_FN_DOUBLE("acos", acos(x)); FLATBUFFERS_FN_DOUBLE("atan", atan(x)); // TODO(wvo): add more useful conversion functions here. #undef FLATBUFFERS_FN_DOUBLE // Then check if this could be a string/identifier enum value: } else if (e.type.base_type != BASE_TYPE_STRING && e.type.base_type != BASE_TYPE_BOOL && e.type.base_type != BASE_TYPE_NONE && (token_ == kTokenIdentifier || token_ == kTokenStringConstant)) { if (IsIdentifierStart(attribute_[0])) { // Enum value. int64_t val; ECHECK(ParseEnumFromString(e.type, &val)); e.constant = NumToString(val); NEXT(); } else { // Numeric constant in string. if (IsInteger(e.type.base_type)) { char *end; e.constant = NumToString(StringToInt(attribute_.c_str(), &end)); if (*end) return Error("invalid integer: " + attribute_); } else if (IsFloat(e.type.base_type)) { char *end; e.constant = NumToString(strtod(attribute_.c_str(), &end)); if (*end) return Error("invalid float: " + attribute_); } else { assert(0); // Shouldn't happen, we covered all types. e.constant = "0"; } NEXT(); } } else { bool match = false; ECHECK(TryTypedValue(kTokenIntegerConstant, IsScalar(e.type.base_type), e, BASE_TYPE_INT, &match)); ECHECK(TryTypedValue(kTokenFloatConstant, IsFloat(e.type.base_type), e, BASE_TYPE_FLOAT, &match)); ECHECK(TryTypedValue(kTokenStringConstant, e.type.base_type == BASE_TYPE_STRING, e, BASE_TYPE_STRING, &match)); auto istrue = IsIdent("true"); if (istrue || IsIdent("false")) { attribute_ = NumToString(istrue); ECHECK(TryTypedValue(kTokenIdentifier, IsBool(e.type.base_type), e, BASE_TYPE_BOOL, &match)); } if (!match) return TokenError(); } return NoError(); } StructDef *Parser::LookupCreateStruct(const std::string &name, bool create_if_new, bool definition) { std::string qualified_name = current_namespace_->GetFullyQualifiedName(name); // See if it exists pre-declared by an unqualified use. auto struct_def = structs_.Lookup(name); if (struct_def && struct_def->predecl) { if (definition) { // Make sure it has the current namespace, and is registered under its // qualified name. struct_def->defined_namespace = current_namespace_; structs_.Move(name, qualified_name); } return struct_def; } // See if it exists pre-declared by an qualified use. struct_def = structs_.Lookup(qualified_name); if (struct_def && struct_def->predecl) { if (definition) { // Make sure it has the current namespace. struct_def->defined_namespace = current_namespace_; } return struct_def; } if (!definition) { // Search thru parent namespaces. for (size_t components = current_namespace_->components.size(); components && !struct_def; components--) { struct_def = structs_.Lookup( current_namespace_->GetFullyQualifiedName(name, components - 1)); } } if (!struct_def && create_if_new) { struct_def = new StructDef(); if (definition) { structs_.Add(qualified_name, struct_def); struct_def->name = name; struct_def->defined_namespace = current_namespace_; } else { // Not a definition. // Rather than failing, we create a "pre declared" StructDef, due to // circular references, and check for errors at the end of parsing. // It is defined in the root namespace, since we don't know what the // final namespace will be. // TODO: maybe safer to use special namespace? structs_.Add(name, struct_def); struct_def->name = name; struct_def->defined_namespace = empty_namespace_; } } return struct_def; } CheckedError Parser::ParseEnum(bool is_union, EnumDef **dest) { std::vector<std::string> enum_comment = doc_comment_; NEXT(); std::string enum_name = attribute_; EXPECT(kTokenIdentifier); auto &enum_def = *new EnumDef(); enum_def.name = enum_name; enum_def.file = file_being_parsed_; enum_def.doc_comment = enum_comment; enum_def.is_union = is_union; enum_def.defined_namespace = current_namespace_; if (enums_.Add(current_namespace_->GetFullyQualifiedName(enum_name), &enum_def)) return Error("enum already exists: " + enum_name); if (is_union) { enum_def.underlying_type.base_type = BASE_TYPE_UTYPE; enum_def.underlying_type.enum_def = &enum_def; } else { if (opts.proto_mode) { enum_def.underlying_type.base_type = BASE_TYPE_INT; } else { // Give specialized error message, since this type spec used to // be optional in the first FlatBuffers release. if (!Is(':')) { return Error("must specify the underlying integer type for this" " enum (e.g. \': short\', which was the default)."); } else { NEXT(); } // Specify the integer type underlying this enum. ECHECK(ParseType(enum_def.underlying_type)); if (!IsInteger(enum_def.underlying_type.base_type)) return Error("underlying enum type must be integral"); } // Make this type refer back to the enum it was derived from. enum_def.underlying_type.enum_def = &enum_def; } ECHECK(ParseMetaData(&enum_def.attributes)); EXPECT('{'); if (is_union) enum_def.vals.Add("NONE", new EnumVal("NONE", 0)); for (;;) { if (opts.proto_mode && attribute_ == "option") { ECHECK(ParseProtoOption()); } else { auto value_name = attribute_; auto full_name = value_name; std::vector<std::string> value_comment = doc_comment_; EXPECT(kTokenIdentifier); if (is_union) { ECHECK(ParseNamespacing(&full_name, &value_name)); if (opts.union_value_namespacing) { // Since we can't namespace the actual enum identifiers, turn // namespace parts into part of the identifier. value_name = full_name; std::replace(value_name.begin(), value_name.end(), '.', '_'); } } auto prevsize = enum_def.vals.vec.size(); auto value = enum_def.vals.vec.size() ? enum_def.vals.vec.back()->value + 1 : 0; auto &ev = *new EnumVal(value_name, value); if (enum_def.vals.Add(value_name, &ev)) return Error("enum value already exists: " + value_name); ev.doc_comment = value_comment; if (is_union) { if (Is(':')) { NEXT(); ECHECK(ParseType(ev.union_type)); if (ev.union_type.base_type != BASE_TYPE_STRUCT && ev.union_type.base_type != BASE_TYPE_STRING) return Error("union value type may only be table/struct/string"); enum_def.uses_type_aliases = true; } else { ev.union_type = Type(BASE_TYPE_STRUCT, LookupCreateStruct(full_name)); } } if (Is('=')) { NEXT(); ev.value = StringToInt(attribute_.c_str()); EXPECT(kTokenIntegerConstant); if (!opts.proto_mode && prevsize && enum_def.vals.vec[prevsize - 1]->value >= ev.value) return Error("enum values must be specified in ascending order"); } if (is_union) { if (ev.value < 0 || ev.value >= 256) return Error("union enum value must fit in a ubyte"); } if (opts.proto_mode && Is('[')) { NEXT(); // ignore attributes on enums. while (token_ != ']') NEXT(); NEXT(); } } if (!Is(opts.proto_mode ? ';' : ',')) break; NEXT(); if (Is('}')) break; } EXPECT('}'); if (enum_def.attributes.Lookup("bit_flags")) { for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { if (static_cast<size_t>((*it)->value) >= SizeOf(enum_def.underlying_type.base_type) * 8) return Error("bit flag out of range of underlying integral type"); (*it)->value = 1LL << (*it)->value; } } if (dest) *dest = &enum_def; types_.Add(current_namespace_->GetFullyQualifiedName(enum_def.name), new Type(BASE_TYPE_UNION, nullptr, &enum_def)); return NoError(); } CheckedError Parser::StartStruct(const std::string &name, StructDef **dest) { auto &struct_def = *LookupCreateStruct(name, true, true); if (!struct_def.predecl) return Error("datatype already exists: " + name); struct_def.predecl = false; struct_def.name = name; struct_def.file = file_being_parsed_; // Move this struct to the back of the vector just in case it was predeclared, // to preserve declaration order. *std::remove(structs_.vec.begin(), structs_.vec.end(), &struct_def) = &struct_def; *dest = &struct_def; return NoError(); } CheckedError Parser::CheckClash(std::vector<FieldDef*> &fields, StructDef *struct_def, const char *suffix, BaseType basetype) { auto len = strlen(suffix); for (auto it = fields.begin(); it != fields.end(); ++it) { auto &fname = (*it)->name; if (fname.length() > len && fname.compare(fname.length() - len, len, suffix) == 0 && (*it)->value.type.base_type != BASE_TYPE_UTYPE) { auto field = struct_def->fields.Lookup( fname.substr(0, fname.length() - len)); if (field && field->value.type.base_type == basetype) return Error("Field " + fname + " would clash with generated functions for field " + field->name); } } return NoError(); } bool Parser::SupportsVectorOfUnions() const { return opts.lang_to_generate != 0 && (opts.lang_to_generate & ~(IDLOptions::kCpp | IDLOptions::kJs | IDLOptions::kTs | IDLOptions::kPhp)) == 0; } Namespace *Parser::UniqueNamespace(Namespace *ns) { for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) { if (ns->components == (*it)->components) { delete ns; return *it; } } namespaces_.push_back(ns); return ns; } static bool compareFieldDefs(const FieldDef *a, const FieldDef *b) { auto a_id = atoi(a->attributes.Lookup("id")->constant.c_str()); auto b_id = atoi(b->attributes.Lookup("id")->constant.c_str()); return a_id < b_id; } CheckedError Parser::ParseDecl() { std::vector<std::string> dc = doc_comment_; bool fixed = IsIdent("struct"); if (!fixed && !IsIdent("table")) return Error("declaration expected"); NEXT(); std::string name = attribute_; EXPECT(kTokenIdentifier); StructDef *struct_def; ECHECK(StartStruct(name, &struct_def)); struct_def->doc_comment = dc; struct_def->fixed = fixed; ECHECK(ParseMetaData(&struct_def->attributes)); struct_def->sortbysize = struct_def->attributes.Lookup("original_order") == nullptr && !fixed; EXPECT('{'); while (token_ != '}') ECHECK(ParseField(*struct_def)); auto force_align = struct_def->attributes.Lookup("force_align"); if (fixed && force_align) { auto align = static_cast<size_t>(atoi(force_align->constant.c_str())); if (force_align->type.base_type != BASE_TYPE_INT || align < struct_def->minalign || align > FLATBUFFERS_MAX_ALIGNMENT || align & (align - 1)) return Error("force_align must be a power of two integer ranging from the" "struct\'s natural alignment to " + NumToString(FLATBUFFERS_MAX_ALIGNMENT)); struct_def->minalign = align; } struct_def->PadLastField(struct_def->minalign); // Check if this is a table that has manual id assignments auto &fields = struct_def->fields.vec; if (!struct_def->fixed && fields.size()) { size_t num_id_fields = 0; for (auto it = fields.begin(); it != fields.end(); ++it) { if ((*it)->attributes.Lookup("id")) num_id_fields++; } // If any fields have ids.. if (num_id_fields) { // Then all fields must have them. if (num_id_fields != fields.size()) return Error( "either all fields or no fields must have an 'id' attribute"); // Simply sort by id, then the fields are the same as if no ids had // been specified. std::sort(fields.begin(), fields.end(), compareFieldDefs); // Verify we have a contiguous set, and reassign vtable offsets. for (int i = 0; i < static_cast<int>(fields.size()); i++) { if (i != atoi(fields[i]->attributes.Lookup("id")->constant.c_str())) return Error("field id\'s must be consecutive from 0, id " + NumToString(i) + " missing or set twice"); fields[i]->value.offset = FieldIndexToOffset(static_cast<voffset_t>(i)); } } } ECHECK(CheckClash(fields, struct_def, UnionTypeFieldSuffix(), BASE_TYPE_UNION)); ECHECK(CheckClash(fields, struct_def, "Type", BASE_TYPE_UNION)); ECHECK(CheckClash(fields, struct_def, "_length", BASE_TYPE_VECTOR)); ECHECK(CheckClash(fields, struct_def, "Length", BASE_TYPE_VECTOR)); ECHECK(CheckClash(fields, struct_def, "_byte_vector", BASE_TYPE_STRING)); ECHECK(CheckClash(fields, struct_def, "ByteVector", BASE_TYPE_STRING)); EXPECT('}'); types_.Add(current_namespace_->GetFullyQualifiedName(struct_def->name), new Type(BASE_TYPE_STRUCT, struct_def, nullptr)); return NoError(); } CheckedError Parser::ParseService() { std::vector<std::string> service_comment = doc_comment_; NEXT(); auto service_name = attribute_; EXPECT(kTokenIdentifier); auto &service_def = *new ServiceDef(); service_def.name = service_name; service_def.file = file_being_parsed_; service_def.doc_comment = service_comment; service_def.defined_namespace = current_namespace_; if (services_.Add(current_namespace_->GetFullyQualifiedName(service_name), &service_def)) return Error("service already exists: " + service_name); ECHECK(ParseMetaData(&service_def.attributes)); EXPECT('{'); do { auto rpc_name = attribute_; EXPECT(kTokenIdentifier); EXPECT('('); Type reqtype, resptype; ECHECK(ParseTypeIdent(reqtype)); EXPECT(')'); EXPECT(':'); ECHECK(ParseTypeIdent(resptype)); if (reqtype.base_type != BASE_TYPE_STRUCT || reqtype.struct_def->fixed || resptype.base_type != BASE_TYPE_STRUCT || resptype.struct_def->fixed) return Error("rpc request and response types must be tables"); auto &rpc = *new RPCCall(); rpc.name = rpc_name; rpc.request = reqtype.struct_def; rpc.response = resptype.struct_def; if (service_def.calls.Add(rpc_name, &rpc)) return Error("rpc already exists: " + rpc_name); ECHECK(ParseMetaData(&rpc.attributes)); EXPECT(';'); } while (token_ != '}'); NEXT(); return NoError(); } bool Parser::SetRootType(const char *name) { root_struct_def_ = structs_.Lookup(name); if (!root_struct_def_) root_struct_def_ = structs_.Lookup( current_namespace_->GetFullyQualifiedName(name)); return root_struct_def_ != nullptr; } void Parser::MarkGenerated() { // This function marks all existing definitions as having already // been generated, which signals no code for included files should be // generated. for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) { (*it)->generated = true; } for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) { if (!(*it)->predecl) { (*it)->generated = true; } } for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) { (*it)->generated = true; } } CheckedError Parser::ParseNamespace() { NEXT(); auto ns = new Namespace(); namespaces_.push_back(ns); // Store it here to not leak upon error. if (token_ != ';') { for (;;) { ns->components.push_back(attribute_); EXPECT(kTokenIdentifier); if (Is('.')) NEXT() else break; } } namespaces_.pop_back(); current_namespace_ = UniqueNamespace(ns); EXPECT(';'); return NoError(); } static bool compareEnumVals(const EnumVal *a, const EnumVal* b) { return a->value < b->value; } // Best effort parsing of .proto declarations, with the aim to turn them // in the closest corresponding FlatBuffer equivalent. // We parse everything as identifiers instead of keywords, since we don't // want protobuf keywords to become invalid identifiers in FlatBuffers. CheckedError Parser::ParseProtoDecl() { bool isextend = IsIdent("extend"); if (IsIdent("package")) { // These are identical in syntax to FlatBuffer's namespace decl. ECHECK(ParseNamespace()); } else if (IsIdent("message") || isextend) { std::vector<std::string> struct_comment = doc_comment_; NEXT(); StructDef *struct_def = nullptr; Namespace *parent_namespace = nullptr; if (isextend) { if (Is('.')) NEXT(); // qualified names may start with a . ? auto id = attribute_; EXPECT(kTokenIdentifier); ECHECK(ParseNamespacing(&id, nullptr)); struct_def = LookupCreateStruct(id, false); if (!struct_def) return Error("cannot extend unknown message type: " + id); } else { std::string name = attribute_; EXPECT(kTokenIdentifier); ECHECK(StartStruct(name, &struct_def)); // Since message definitions can be nested, we create a new namespace. auto ns = new Namespace(); // Copy of current namespace. *ns = *current_namespace_; // But with current message name. ns->components.push_back(name); parent_namespace = current_namespace_; current_namespace_ = UniqueNamespace(ns); } struct_def->doc_comment = struct_comment; ECHECK(ParseProtoFields(struct_def, isextend, false)); if (!isextend) { current_namespace_ = parent_namespace; } if (Is(';')) NEXT(); } else if (IsIdent("enum")) { // These are almost the same, just with different terminator: EnumDef *enum_def; ECHECK(ParseEnum(false, &enum_def)); if (Is(';')) NEXT(); // Protobuf allows them to be specified in any order, so sort afterwards. auto &v = enum_def->vals.vec; std::sort(v.begin(), v.end(), compareEnumVals); // Temp: remove any duplicates, as .fbs files can't handle them. for (auto it = v.begin(); it != v.end(); ) { if (it != v.begin() && it[0]->value == it[-1]->value) it = v.erase(it); else ++it; } } else if (IsIdent("syntax")) { // Skip these. NEXT(); EXPECT('='); EXPECT(kTokenStringConstant); EXPECT(';'); } else if (IsIdent("option")) { // Skip these. ECHECK(ParseProtoOption()); EXPECT(';'); } else if (IsIdent("service")) { // Skip these. NEXT(); EXPECT(kTokenIdentifier); ECHECK(ParseProtoCurliesOrIdent()); } else { return Error("don\'t know how to parse .proto declaration starting with " + TokenToStringId(token_)); } return NoError(); } CheckedError Parser::ParseProtoFields(StructDef *struct_def, bool isextend, bool inside_oneof) { EXPECT('{'); while (token_ != '}') { if (IsIdent("message") || IsIdent("extend") || IsIdent("enum")) { // Nested declarations. ECHECK(ParseProtoDecl()); } else if (IsIdent("extensions")) { // Skip these. NEXT(); EXPECT(kTokenIntegerConstant); if (Is(kTokenIdentifier)) { NEXT(); // to NEXT(); // num } EXPECT(';'); } else if (IsIdent("option")) { // Skip these. ECHECK(ParseProtoOption()); EXPECT(';'); } else if (IsIdent("reserved")) { // Skip these. NEXT(); EXPECT(kTokenIntegerConstant); while (Is(',')) { NEXT(); EXPECT(kTokenIntegerConstant); } EXPECT(';'); } else { std::vector<std::string> field_comment = doc_comment_; // Parse the qualifier. bool required = false; bool repeated = false; bool oneof = false; if (!inside_oneof) { if (IsIdent("optional")) { // This is the default. NEXT(); } else if (IsIdent("required")) { required = true; NEXT(); } else if (IsIdent("repeated")) { repeated = true; NEXT(); } else if (IsIdent("oneof")) { oneof = true; NEXT(); } else { // can't error, proto3 allows decls without any of the above. } } StructDef *anonymous_struct = nullptr; Type type; if (IsIdent("group") || oneof) { if (!oneof) NEXT(); auto name = "Anonymous" + NumToString(anonymous_counter++); ECHECK(StartStruct(name, &anonymous_struct)); type = Type(BASE_TYPE_STRUCT, anonymous_struct); } else { ECHECK(ParseTypeFromProtoType(&type)); } // Repeated elements get mapped to a vector. if (repeated) { type.element = type.base_type; type.base_type = BASE_TYPE_VECTOR; } std::string name = attribute_; EXPECT(kTokenIdentifier); if (!oneof) { // Parse the field id. Since we're just translating schemas, not // any kind of binary compatibility, we can safely ignore these, and // assign our own. EXPECT('='); EXPECT(kTokenIntegerConstant); } FieldDef *field = nullptr; if (isextend) { // We allow a field to be re-defined when extending. // TODO: are there situations where that is problematic? field = struct_def->fields.Lookup(name); } if (!field) ECHECK(AddField(*struct_def, name, type, &field)); field->doc_comment = field_comment; if (!IsScalar(type.base_type)) field->required = required; // See if there's a default specified. if (Is('[')) { NEXT(); for (;;) { auto key = attribute_; ECHECK(ParseProtoKey()); EXPECT('='); auto val = attribute_; ECHECK(ParseProtoCurliesOrIdent()); if (key == "default") { // Temp: skip non-numeric defaults (enums). auto numeric = strpbrk(val.c_str(), "0123456789-+."); if (IsScalar(type.base_type) && numeric == val.c_str()) field->value.constant = val; } else if (key == "deprecated") { field->deprecated = val == "true"; } if (!Is(',')) break; NEXT(); } EXPECT(']'); } if (anonymous_struct) { ECHECK(ParseProtoFields(anonymous_struct, false, oneof)); if (Is(';')) NEXT(); } else { EXPECT(';'); } } } NEXT(); return NoError(); } CheckedError Parser::ParseProtoKey() { if (token_ == '(') { NEXT(); // Skip "(a.b)" style custom attributes. while (token_ == '.' || token_ == kTokenIdentifier) NEXT(); EXPECT(')'); while (Is('.')) { NEXT(); EXPECT(kTokenIdentifier); } } else { EXPECT(kTokenIdentifier); } return NoError(); } CheckedError Parser::ParseProtoCurliesOrIdent() { if (Is('{')) { NEXT(); for (int nesting = 1; nesting; ) { if (token_ == '{') nesting++; else if (token_ == '}') nesting--; NEXT(); } } else { NEXT(); // Any single token. } return NoError(); } CheckedError Parser::ParseProtoOption() { NEXT(); ECHECK(ParseProtoKey()); EXPECT('='); ECHECK(ParseProtoCurliesOrIdent()); return NoError(); } // Parse a protobuf type, and map it to the corresponding FlatBuffer one. CheckedError Parser::ParseTypeFromProtoType(Type *type) { struct type_lookup { const char *proto_type; BaseType fb_type, element; }; static type_lookup lookup[] = { { "float", BASE_TYPE_FLOAT, BASE_TYPE_NONE }, { "double", BASE_TYPE_DOUBLE, BASE_TYPE_NONE }, { "int32", BASE_TYPE_INT, BASE_TYPE_NONE }, { "int64", BASE_TYPE_LONG, BASE_TYPE_NONE }, { "uint32", BASE_TYPE_UINT, BASE_TYPE_NONE }, { "uint64", BASE_TYPE_ULONG, BASE_TYPE_NONE }, { "sint32", BASE_TYPE_INT, BASE_TYPE_NONE }, { "sint64", BASE_TYPE_LONG, BASE_TYPE_NONE }, { "fixed32", BASE_TYPE_UINT, BASE_TYPE_NONE }, { "fixed64", BASE_TYPE_ULONG, BASE_TYPE_NONE }, { "sfixed32", BASE_TYPE_INT, BASE_TYPE_NONE }, { "sfixed64", BASE_TYPE_LONG, BASE_TYPE_NONE }, { "bool", BASE_TYPE_BOOL, BASE_TYPE_NONE }, { "string", BASE_TYPE_STRING, BASE_TYPE_NONE }, { "bytes", BASE_TYPE_VECTOR, BASE_TYPE_UCHAR }, { nullptr, BASE_TYPE_NONE, BASE_TYPE_NONE } }; for (auto tl = lookup; tl->proto_type; tl++) { if (attribute_ == tl->proto_type) { type->base_type = tl->fb_type; type->element = tl->element; NEXT(); return NoError(); } } if (Is('.')) NEXT(); // qualified names may start with a . ? ECHECK(ParseTypeIdent(*type)); return NoError(); } CheckedError Parser::SkipAnyJsonValue() { switch (token_) { case '{': { size_t fieldn_outer = 0; return ParseTableDelimiters(fieldn_outer, nullptr, [](const std::string &, size_t &fieldn, const StructDef *, void *state) -> CheckedError { auto *parser = static_cast<Parser *>(state); ECHECK(parser->SkipAnyJsonValue()); fieldn++; return NoError(); }, this); } case '[': { size_t count = 0; return ParseVectorDelimiters(count, [](size_t &, void *state) -> CheckedError { return static_cast<Parser *>(state)->SkipAnyJsonValue(); }, this); } case kTokenStringConstant: case kTokenIntegerConstant: case kTokenFloatConstant: NEXT(); break; default: if (IsIdent("true") || IsIdent("false") || IsIdent("null")) { NEXT(); } else return TokenError(); } return NoError(); } CheckedError Parser::ParseFlexBufferValue(flexbuffers::Builder *builder) { switch (token_) { case '{': { std::pair<Parser *, flexbuffers::Builder *> parser_and_builder_state( this, builder); auto start = builder->StartMap(); size_t fieldn_outer = 0; auto err = ParseTableDelimiters(fieldn_outer, nullptr, [](const std::string &name, size_t &fieldn, const StructDef *, void *state) -> CheckedError { auto *parser_and_builder = static_cast<std::pair<Parser *, flexbuffers::Builder *> *>( state); auto *parser = parser_and_builder->first; auto *current_builder = parser_and_builder->second; current_builder->Key(name); ECHECK(parser->ParseFlexBufferValue(current_builder)); fieldn++; return NoError(); }, &parser_and_builder_state); ECHECK(err); builder->EndMap(start); break; } case '[':{ auto start = builder->StartVector(); size_t count = 0; std::pair<Parser *, flexbuffers::Builder *> parser_and_builder_state( this, builder); ECHECK(ParseVectorDelimiters(count, [](size_t &, void *state) -> CheckedError { auto *parser_and_builder = static_cast<std::pair<Parser *, flexbuffers::Builder *> *>( state); return parser_and_builder->first->ParseFlexBufferValue( parser_and_builder->second); }, &parser_and_builder_state)); builder->EndVector(start, false, false); break; } case kTokenStringConstant: builder->String(attribute_); EXPECT(kTokenStringConstant); break; case kTokenIntegerConstant: builder->Int(StringToInt(attribute_.c_str())); EXPECT(kTokenIntegerConstant); break; case kTokenFloatConstant: builder->Double(strtod(attribute_.c_str(), nullptr)); EXPECT(kTokenFloatConstant); break; default: if (IsIdent("true")) { builder->Bool(true); NEXT(); } else if (IsIdent("false")) { builder->Bool(false); NEXT(); } else if (IsIdent("null")) { builder->Null(); NEXT(); } else return TokenError(); } return NoError(); } bool Parser::ParseFlexBuffer(const char *source, const char *source_filename, flexbuffers::Builder *builder) { auto ok = !StartParseFile(source, source_filename).Check() && !ParseFlexBufferValue(builder).Check(); if (ok) builder->Finish(); return ok; } bool Parser::Parse(const char *source, const char **include_paths, const char *source_filename) { return !ParseRoot(source, include_paths, source_filename).Check(); } CheckedError Parser::StartParseFile(const char *source, const char *source_filename) { file_being_parsed_ = source_filename ? source_filename : ""; source_ = cursor_ = source; line_ = 1; error_.clear(); ECHECK(SkipByteOrderMark()); NEXT(); if (Is(kTokenEof)) return Error("input file is empty"); return NoError(); } CheckedError Parser::ParseRoot(const char *source, const char **include_paths, const char *source_filename) { ECHECK(DoParse(source, include_paths, source_filename, nullptr)); // Check that all types were defined. for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) { if ((*it)->predecl) { return Error("type referenced but not defined (check namespace): " + (*it)->name); } } // This check has to happen here and not earlier, because only now do we // know for sure what the type of these are. for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) { auto &enum_def = **it; if (enum_def.is_union) { for (auto val_it = enum_def.vals.vec.begin(); val_it != enum_def.vals.vec.end(); ++val_it) { auto &val = **val_it; if (!SupportsVectorOfUnions() && val.union_type.struct_def && val.union_type.struct_def->fixed) return Error( "only tables can be union elements in the generated language: " + val.name); } } } return NoError(); } CheckedError Parser::DoParse(const char *source, const char **include_paths, const char *source_filename, const char *include_filename) { if (source_filename && included_files_.find(source_filename) == included_files_.end()) { included_files_[source_filename] = include_filename ? include_filename : ""; files_included_per_file_[source_filename] = std::set<std::string>(); } if (!include_paths) { static const char *current_directory[] = { "", nullptr }; include_paths = current_directory; } field_stack_.clear(); builder_.Clear(); // Start with a blank namespace just in case this file doesn't have one. current_namespace_ = empty_namespace_; ECHECK(StartParseFile(source, source_filename)); // Includes must come before type declarations: for (;;) { // Parse pre-include proto statements if any: if (opts.proto_mode && (attribute_ == "option" || attribute_ == "syntax" || attribute_ == "package")) { ECHECK(ParseProtoDecl()); } else if (IsIdent("native_include")) { NEXT(); vector_emplace_back(&native_included_files_, attribute_); EXPECT(kTokenStringConstant); } else if (IsIdent("include") || (opts.proto_mode && IsIdent("import"))) { NEXT(); if (opts.proto_mode && attribute_ == "public") NEXT(); auto name = flatbuffers::PosixPath(attribute_.c_str()); EXPECT(kTokenStringConstant); // Look for the file in include_paths. std::string filepath; for (auto paths = include_paths; paths && *paths; paths++) { filepath = flatbuffers::ConCatPathFileName(*paths, name); if(FileExists(filepath.c_str())) break; } if (filepath.empty()) return Error("unable to locate include file: " + name); if (source_filename) files_included_per_file_[source_filename].insert(filepath); if (included_files_.find(filepath) == included_files_.end()) { // We found an include file that we have not parsed yet. // Load it and parse it. std::string contents; if (!LoadFile(filepath.c_str(), true, &contents)) return Error("unable to load include file: " + name); ECHECK(DoParse(contents.c_str(), include_paths, filepath.c_str(), name.c_str())); // We generally do not want to output code for any included files: if (!opts.generate_all) MarkGenerated(); // Reset these just in case the included file had them, and the // parent doesn't. root_struct_def_ = nullptr; file_identifier_.clear(); file_extension_.clear(); // This is the easiest way to continue this file after an include: // instead of saving and restoring all the state, we simply start the // file anew. This will cause it to encounter the same include // statement again, but this time it will skip it, because it was // entered into included_files_. // This is recursive, but only go as deep as the number of include // statements. return DoParse(source, include_paths, source_filename, include_filename); } EXPECT(';'); } else { break; } } // Now parse all other kinds of declarations: while (token_ != kTokenEof) { if (opts.proto_mode) { ECHECK(ParseProtoDecl()); } else if (IsIdent("namespace")) { ECHECK(ParseNamespace()); } else if (token_ == '{') { if (!root_struct_def_) return Error("no root type set to parse json with"); if (builder_.GetSize()) { return Error("cannot have more than one json object in a file"); } uoffset_t toff; ECHECK(ParseTable(*root_struct_def_, nullptr, &toff)); builder_.Finish(Offset<Table>(toff), file_identifier_.length() ? file_identifier_.c_str() : nullptr); } else if (IsIdent("enum")) { ECHECK(ParseEnum(false, nullptr)); } else if (IsIdent("union")) { ECHECK(ParseEnum(true, nullptr)); } else if (IsIdent("root_type")) { NEXT(); auto root_type = attribute_; EXPECT(kTokenIdentifier); ECHECK(ParseNamespacing(&root_type, nullptr)); if (!SetRootType(root_type.c_str())) return Error("unknown root type: " + root_type); if (root_struct_def_->fixed) return Error("root type must be a table"); EXPECT(';'); } else if (IsIdent("file_identifier")) { NEXT(); file_identifier_ = attribute_; EXPECT(kTokenStringConstant); if (file_identifier_.length() != FlatBufferBuilder::kFileIdentifierLength) return Error("file_identifier must be exactly " + NumToString(FlatBufferBuilder::kFileIdentifierLength) + " characters"); EXPECT(';'); } else if (IsIdent("file_extension")) { NEXT(); file_extension_ = attribute_; EXPECT(kTokenStringConstant); EXPECT(';'); } else if(IsIdent("include")) { return Error("includes must come before declarations"); } else if(IsIdent("attribute")) { NEXT(); auto name = attribute_; EXPECT(kTokenStringConstant); EXPECT(';'); known_attributes_[name] = false; } else if (IsIdent("rpc_service")) { ECHECK(ParseService()); } else { ECHECK(ParseDecl()); } } return NoError(); } std::set<std::string> Parser::GetIncludedFilesRecursive( const std::string &file_name) const { std::set<std::string> included_files; std::list<std::string> to_process; if (file_name.empty()) return included_files; to_process.push_back(file_name); while (!to_process.empty()) { std::string current = to_process.front(); to_process.pop_front(); included_files.insert(current); // Workaround the lack of const accessor in C++98 maps. auto &new_files = (*const_cast<std::map<std::string, std::set<std::string>> *>( &files_included_per_file_))[current]; for (auto it = new_files.begin(); it != new_files.end(); ++it) { if (included_files.find(*it) == included_files.end()) to_process.push_back(*it); } } return included_files; } // Schema serialization functionality: template<typename T> bool compareName(const T* a, const T* b) { return a->defined_namespace->GetFullyQualifiedName(a->name) < b->defined_namespace->GetFullyQualifiedName(b->name); } template<typename T> void AssignIndices(const std::vector<T *> &defvec) { // Pre-sort these vectors, such that we can set the correct indices for them. auto vec = defvec; std::sort(vec.begin(), vec.end(), compareName<T>); for (int i = 0; i < static_cast<int>(vec.size()); i++) vec[i]->index = i; } void Parser::Serialize() { builder_.Clear(); AssignIndices(structs_.vec); AssignIndices(enums_.vec); std::vector<Offset<reflection::Object>> object_offsets; for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) { auto offset = (*it)->Serialize(&builder_, *this); object_offsets.push_back(offset); (*it)->serialized_location = offset.o; } std::vector<Offset<reflection::Enum>> enum_offsets; for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) { auto offset = (*it)->Serialize(&builder_, *this); enum_offsets.push_back(offset); (*it)->serialized_location = offset.o; } auto schema_offset = reflection::CreateSchema( builder_, builder_.CreateVectorOfSortedTables(&object_offsets), builder_.CreateVectorOfSortedTables(&enum_offsets), builder_.CreateString(file_identifier_), builder_.CreateString(file_extension_), root_struct_def_ ? root_struct_def_->serialized_location : 0); builder_.Finish(schema_offset, reflection::SchemaIdentifier()); } Offset<reflection::Object> StructDef::Serialize(FlatBufferBuilder *builder, const Parser &parser) const { std::vector<Offset<reflection::Field>> field_offsets; for (auto it = fields.vec.begin(); it != fields.vec.end(); ++it) { field_offsets.push_back( (*it)->Serialize(builder, static_cast<uint16_t>(it - fields.vec.begin()), parser)); } auto qualified_name = defined_namespace->GetFullyQualifiedName(name); return reflection::CreateObject(*builder, builder->CreateString(qualified_name), builder->CreateVectorOfSortedTables( &field_offsets), fixed, static_cast<int>(minalign), static_cast<int>(bytesize), SerializeAttributes(builder, parser), parser.opts.binary_schema_comments ? builder->CreateVectorOfStrings( doc_comment) : 0); } Offset<reflection::Field> FieldDef::Serialize(FlatBufferBuilder *builder, uint16_t id, const Parser &parser) const { return reflection::CreateField(*builder, builder->CreateString(name), value.type.Serialize(builder), id, value.offset, IsInteger(value.type.base_type) ? StringToInt(value.constant.c_str()) : 0, IsFloat(value.type.base_type) ? strtod(value.constant.c_str(), nullptr) : 0.0, deprecated, required, key, SerializeAttributes(builder, parser), parser.opts.binary_schema_comments ? builder->CreateVectorOfStrings(doc_comment) : 0); // TODO: value.constant is almost always "0", we could save quite a bit of // space by sharing it. Same for common values of value.type. } Offset<reflection::Enum> EnumDef::Serialize(FlatBufferBuilder *builder, const Parser &parser) const { std::vector<Offset<reflection::EnumVal>> enumval_offsets; for (auto it = vals.vec.begin(); it != vals.vec.end(); ++it) { enumval_offsets.push_back((*it)->Serialize(builder)); } auto qualified_name = defined_namespace->GetFullyQualifiedName(name); return reflection::CreateEnum(*builder, builder->CreateString(qualified_name), builder->CreateVector(enumval_offsets), is_union, underlying_type.Serialize(builder), SerializeAttributes(builder, parser), parser.opts.binary_schema_comments ? builder->CreateVectorOfStrings(doc_comment) : 0); } Offset<reflection::EnumVal> EnumVal::Serialize(FlatBufferBuilder *builder) const { return reflection::CreateEnumVal(*builder, builder->CreateString(name), value, union_type.struct_def ? union_type.struct_def-> serialized_location : 0, union_type.Serialize(builder)); } Offset<reflection::Type> Type::Serialize(FlatBufferBuilder *builder) const { return reflection::CreateType(*builder, static_cast<reflection::BaseType>(base_type), static_cast<reflection::BaseType>(element), struct_def ? struct_def->index : (enum_def ? enum_def->index : -1)); } flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset< reflection::KeyValue>>> Definition::SerializeAttributes(FlatBufferBuilder *builder, const Parser &parser) const { std::vector<flatbuffers::Offset<reflection::KeyValue>> attrs; for (auto kv = attributes.dict.begin(); kv != attributes.dict.end(); ++kv) { auto it = parser.known_attributes_.find(kv->first); assert(it != parser.known_attributes_.end()); if (!it->second) { // Custom attribute. attrs.push_back( reflection::CreateKeyValue(*builder, builder->CreateString(kv->first), builder->CreateString( kv->second->constant))); } } if (attrs.size()) { return builder->CreateVectorOfSortedTables(&attrs); } else { return 0; } } std::string Parser::ConformTo(const Parser &base) { for (auto sit = structs_.vec.begin(); sit != structs_.vec.end(); ++sit) { auto &struct_def = **sit; auto qualified_name = struct_def.defined_namespace->GetFullyQualifiedName(struct_def.name); auto struct_def_base = base.structs_.Lookup(qualified_name); if (!struct_def_base) continue; for (auto fit = struct_def.fields.vec.begin(); fit != struct_def.fields.vec.end(); ++fit) { auto &field = **fit; auto field_base = struct_def_base->fields.Lookup(field.name); if (field_base) { if (field.value.offset != field_base->value.offset) return "offsets differ for field: " + field.name; if (field.value.constant != field_base->value.constant) return "defaults differ for field: " + field.name; if (!EqualByName(field.value.type, field_base->value.type)) return "types differ for field: " + field.name; } else { // Doesn't have to exist, deleting fields is fine. // But we should check if there is a field that has the same offset // but is incompatible (in the case of field renaming). for (auto fbit = struct_def_base->fields.vec.begin(); fbit != struct_def_base->fields.vec.end(); ++fbit) { field_base = *fbit; if (field.value.offset == field_base->value.offset) { if (!EqualByName(field.value.type, field_base->value.type)) return "field renamed to different type: " + field.name; break; } } } } } for (auto eit = enums_.vec.begin(); eit != enums_.vec.end(); ++eit) { auto &enum_def = **eit; auto qualified_name = enum_def.defined_namespace->GetFullyQualifiedName(enum_def.name); auto enum_def_base = base.enums_.Lookup(qualified_name); if (!enum_def_base) continue; for (auto evit = enum_def.vals.vec.begin(); evit != enum_def.vals.vec.end(); ++evit) { auto &enum_val = **evit; auto enum_val_base = enum_def_base->vals.Lookup(enum_val.name); if (enum_val_base) { if (enum_val.value != enum_val_base->value) return "values differ for enum: " + enum_val.name; } } } return ""; } } // namespace flatbuffers
1
12,450
maybe at least pull the file identifier arg out of the if?
google-flatbuffers
java
@@ -1028,17 +1028,13 @@ class CommandDispatcher: raise cmdexc.CommandError("Can't move tab to position {}!".format( new_idx + 1)) - tab = self._current_widget() cur_idx = self._current_index() - icon = self._tabbed_browser.tabIcon(cur_idx) - label = self._tabbed_browser.page_title(cur_idx) cmdutils.check_overflow(cur_idx, 'int') cmdutils.check_overflow(new_idx, 'int') self._tabbed_browser.setUpdatesEnabled(False) try: color = self._tabbed_browser.tab_indicator_color(cur_idx) - self._tabbed_browser.removeTab(cur_idx) - self._tabbed_browser.insertTab(new_idx, tab, icon, label) + self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx) self._set_current_index(new_idx) self._tabbed_browser.set_tab_indicator_color(new_idx, color) finally:
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Command dispatcher for TabbedBrowser.""" import os import os.path import shlex import functools from PyQt5.QtWidgets import QApplication, QTabBar from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery from PyQt5.QtGui import QKeyEvent from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog try: from PyQt5.QtWebKitWidgets import QWebPage except ImportError: QWebPage = None try: from PyQt5.QtWebEngineWidgets import QWebEnginePage except ImportError: QWebEnginePage = None import pygments import pygments.lexers import pygments.formatters from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners from qutebrowser.config import config, configexc from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate, webelem, downloads) try: from qutebrowser.browser.webkit import mhtml except ImportError: # Failing imports on QtWebEngine, only used in QtWebKit commands. # FIXME:qtwebengine don't import this anymore at all pass from qutebrowser.keyinput import modeman from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils, objreg, utils, typing) from qutebrowser.utils.usertypes import KeyMode from qutebrowser.misc import editor, guiprocess from qutebrowser.completion.models import instances, sortfilter class CommandDispatcher: """Command dispatcher for TabbedBrowser. Contains all commands which are related to the current tab. We can't simply add these commands to BrowserTab directly and use currentWidget() for TabbedBrowser.cmd because at the time cmdutils.register() decorators are run, currentWidget() will return None. Attributes: _editor: The ExternalEditor object. _win_id: The window ID the CommandDispatcher is associated with. _tabbed_browser: The TabbedBrowser used. """ def __init__(self, win_id, tabbed_browser): self._win_id = win_id self._tabbed_browser = tabbed_browser def __repr__(self): return utils.get_repr(self) def _new_tabbed_browser(self): """Get a tabbed-browser from a new window.""" from qutebrowser.mainwindow import mainwindow new_window = mainwindow.MainWindow() new_window.show() return new_window.tabbed_browser def _count(self): """Convenience method to get the widget count.""" return self._tabbed_browser.count() def _set_current_index(self, idx): """Convenience method to set the current widget index.""" cmdutils.check_overflow(idx, 'int') self._tabbed_browser.setCurrentIndex(idx) def _current_index(self): """Convenience method to get the current widget index.""" return self._tabbed_browser.currentIndex() def _current_url(self): """Convenience method to get the current url.""" try: return self._tabbed_browser.current_url() except qtutils.QtValueError as e: msg = "Current URL is invalid" if e.reason: msg += " ({})".format(e.reason) msg += "!" raise cmdexc.CommandError(msg) def _current_title(self): """Convenience method to get the current title.""" return self._current_widget().title() def _current_widget(self): """Get the currently active widget from a command.""" widget = self._tabbed_browser.currentWidget() if widget is None: raise cmdexc.CommandError("No WebView available yet!") return widget def _open(self, url, tab=False, background=False, window=False, explicit=True): """Helper function to open a page. Args: url: The URL to open as QUrl. tab: Whether to open in a new tab. background: Whether to open in the background. window: Whether to open in a new window """ urlutils.raise_cmdexc_if_invalid(url) tabbed_browser = self._tabbed_browser cmdutils.check_exclusive((tab, background, window), 'tbw') if window: tabbed_browser = self._new_tabbed_browser() tabbed_browser.tabopen(url) elif tab: tabbed_browser.tabopen(url, background=False, explicit=explicit) elif background: tabbed_browser.tabopen(url, background=True, explicit=explicit) else: widget = self._current_widget() widget.openurl(url) def _cntwidget(self, count=None): """Return a widget based on a count/idx. Args: count: The tab index, or None. Return: The current widget if count is None. The widget with the given tab ID if count is given. None if no widget was found. """ if count is None: return self._tabbed_browser.currentWidget() elif 1 <= count <= self._count(): cmdutils.check_overflow(count + 1, 'int') return self._tabbed_browser.widget(count - 1) else: return None def _tab_focus_last(self): """Select the tab which was last focused.""" try: tab = objreg.get('last-focused-tab', scope='window', window=self._win_id) except KeyError: raise cmdexc.CommandError("No last focused tab!") idx = self._tabbed_browser.indexOf(tab) if idx == -1: raise cmdexc.CommandError("Last focused tab vanished!") self._set_current_index(idx) def _get_selection_override(self, prev, next_, opposite): """Helper function for tab_close to get the tab to select. Args: prev: Force selecting the tab before the current tab. next_: Force selecting the tab after the current tab. opposite: Force selecting the tab in the opposite direction of what's configured in 'tabs->select-on-remove'. Return: QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change should be made. """ cmdutils.check_exclusive((prev, next_, opposite), 'pno') if prev: return QTabBar.SelectLeftTab elif next_: return QTabBar.SelectRightTab elif opposite: conf_selection = config.get('tabs', 'select-on-remove') if conf_selection == QTabBar.SelectLeftTab: return QTabBar.SelectRightTab elif conf_selection == QTabBar.SelectRightTab: return QTabBar.SelectLeftTab elif conf_selection == QTabBar.SelectPreviousTab: raise cmdexc.CommandError( "-o is not supported with 'tabs->select-on-remove' set to " "'last-used'!") else: # pragma: no cover raise ValueError("Invalid select-on-remove value " "{!r}!".format(conf_selection)) return None @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def tab_close(self, prev=False, next_=False, opposite=False, count=None): """Close the current/[count]th tab. Args: prev: Force selecting the tab before the current tab. next_: Force selecting the tab after the current tab. opposite: Force selecting the tab in the opposite direction of what's configured in 'tabs->select-on-remove'. count: The tab index to close, or None """ tab = self._cntwidget(count) if tab is None: return tabbar = self._tabbed_browser.tabBar() selection_override = self._get_selection_override(prev, next_, opposite) if selection_override is None: self._tabbed_browser.close_tab(tab) else: old_selection_behavior = tabbar.selectionBehaviorOnRemove() tabbar.setSelectionBehaviorOnRemove(selection_override) self._tabbed_browser.close_tab(tab) tabbar.setSelectionBehaviorOnRemove(old_selection_behavior) @cmdutils.register(instance='command-dispatcher', name='open', maxsplit=0, scope='window') @cmdutils.argument('url', completion=usertypes.Completion.url) @cmdutils.argument('count', count=True) def openurl(self, url=None, implicit=False, bg=False, tab=False, window=False, count=None): """Open a URL in the current/[count]th tab. If the URL contains newlines, each line gets opened in its own tab. Args: url: The URL to open. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. implicit: If opening a new tab, treat the tab as implicit (like clicking on a link). count: The tab index to open the URL in, or None. """ if url is None: urls = [config.get('general', 'default-page')] else: urls = self._parse_url_input(url) for i, cur_url in enumerate(urls): if not window and i > 0: tab = False bg = True if tab or bg or window: self._open(cur_url, tab, bg, window, not implicit) else: curtab = self._cntwidget(count) if curtab is None: if count is None: # We want to open a URL in the current tab, but none # exists yet. self._tabbed_browser.tabopen(cur_url) else: # Explicit count with a tab that doesn't exist. return else: curtab.openurl(cur_url) def _parse_url(self, url, *, force_search=False): """Parse a URL or quickmark or search query. Args: url: The URL to parse. force_search: Whether to force a search even if the content can be interpreted as a URL or a path. Return: A URL that can be opened. """ try: return objreg.get('quickmark-manager').get(url) except urlmarks.Error: try: return urlutils.fuzzy_url(url, force_search=force_search) except urlutils.InvalidUrlError as e: # We don't use cmdexc.CommandError here as this can be # called async from edit_url message.error(str(e)) return None def _parse_url_input(self, url): """Parse a URL or newline-separated list of URLs. Args: url: The URL or list to parse. Return: A list of URLs that can be opened. """ force_search = False urllist = [u for u in url.split('\n') if u.strip()] if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and urlutils.get_path_if_valid(urllist[0], check_exists=True) is None): urllist = [url] force_search = True for cur_url in urllist: parsed = self._parse_url(cur_url, force_search=force_search) if parsed is not None: yield parsed @cmdutils.register(instance='command-dispatcher', name='reload', scope='window') @cmdutils.argument('count', count=True) def reloadpage(self, force=False, count=None): """Reload the current/[count]th tab. Args: count: The tab index to reload, or None. force: Bypass the page cache. """ tab = self._cntwidget(count) if tab is not None: tab.reload(force=force) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def stop(self, count=None): """Stop loading in the current/[count]th tab. Args: count: The tab index to stop, or None. """ tab = self._cntwidget(count) if tab is not None: tab.stop() @cmdutils.register(instance='command-dispatcher', name='print', scope='window') @cmdutils.argument('count', count=True) @cmdutils.argument('pdf', flag='f', metavar='file') def printpage(self, preview=False, count=None, *, pdf=None): """Print the current/[count]th tab. Args: preview: Show preview instead of printing. count: The tab index to print, or None. pdf: The file path to write the PDF to. """ tab = self._cntwidget(count) if tab is None: return try: if pdf: tab.printing.check_pdf_support() else: tab.printing.check_printer_support() except browsertab.WebTabError as e: raise cmdexc.CommandError(e) if preview: diag = QPrintPreviewDialog() diag.setAttribute(Qt.WA_DeleteOnClose) diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint) diag.paintRequested.connect(tab.printing.to_printer) diag.exec_() elif pdf: pdf = os.path.expanduser(pdf) directory = os.path.dirname(pdf) if directory and not os.path.exists(directory): os.mkdir(directory) tab.printing.to_pdf(pdf) log.misc.debug("Print to file: {}".format(pdf)) else: diag = QPrintDialog() diag.setAttribute(Qt.WA_DeleteOnClose) diag.open(lambda: tab.printing.to_printer(diag.printer())) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_clone(self, bg=False, window=False): """Duplicate the current tab. Args: bg: Open in a background tab. window: Open in a new window. Return: The new QWebView. """ cmdutils.check_exclusive((bg, window), 'bw') curtab = self._current_widget() cur_title = self._tabbed_browser.page_title(self._current_index()) # The new tab could be in a new tabbed_browser (e.g. because of # tabs-are-windows being set) if window: new_tabbed_browser = self._new_tabbed_browser() else: new_tabbed_browser = self._tabbed_browser newtab = new_tabbed_browser.tabopen(background=bg, explicit=True) new_tabbed_browser = objreg.get('tabbed-browser', scope='window', window=newtab.win_id) idx = new_tabbed_browser.indexOf(newtab) new_tabbed_browser.set_page_title(idx, cur_title) if config.get('tabs', 'show-favicons'): new_tabbed_browser.setTabIcon(idx, curtab.icon()) if config.get('tabs', 'tabs-are-windows'): new_tabbed_browser.window().setWindowIcon(curtab.icon()) newtab.data.keep_icon = True newtab.history.deserialize(curtab.history.serialize()) newtab.zoom.set_factor(curtab.zoom.factor()) return newtab @cmdutils.register(instance='command-dispatcher', scope='window') def tab_detach(self): """Detach the current tab to its own window.""" if self._count() < 2: raise cmdexc.CommandError("Cannot detach one tab.") url = self._current_url() self._open(url, window=True) cur_widget = self._current_widget() self._tabbed_browser.close_tab(cur_widget, add_undo=False) def _back_forward(self, tab, bg, window, count, forward): """Helper function for :back/:forward.""" history = self._current_widget().history # Catch common cases before e.g. cloning tab if not forward and not history.can_go_back(): raise cmdexc.CommandError("At beginning of history.") elif forward and not history.can_go_forward(): raise cmdexc.CommandError("At end of history.") if tab or bg or window: widget = self.tab_clone(bg, window) else: widget = self._current_widget() for _ in range(count): if forward: if not widget.history.can_go_forward(): raise cmdexc.CommandError("At end of history.") widget.history.forward() else: if not widget.history.can_go_back(): raise cmdexc.CommandError("At beginning of history.") widget.history.back() @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def back(self, tab=False, bg=False, window=False, count=1): """Go back in the history of the current tab. Args: tab: Go back in a new tab. bg: Go back in a background tab. window: Go back in a new window. count: How many pages to go back. """ self._back_forward(tab, bg, window, count, forward=False) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def forward(self, tab=False, bg=False, window=False, count=1): """Go forward in the history of the current tab. Args: tab: Go forward in a new tab. bg: Go forward in a background tab. window: Go forward in a new window. count: How many pages to go forward. """ self._back_forward(tab, bg, window, count, forward=True) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment', 'decrement']) @cmdutils.argument('count', count=True) def navigate(self, where: str, tab=False, bg=False, window=False, count=1): """Open typical prev/next links or navigate using the URL path. This tries to automatically click on typical _Previous Page_ or _Next Page_ links using some heuristics. Alternatively it can navigate by changing the current URL. Args: where: What to open. - `prev`: Open a _previous_ link. - `next`: Open a _next_ link. - `up`: Go up a level in the current URL. - `increment`: Increment the last number in the URL. - `decrement`: Decrement the last number in the URL. tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. count: For `increment` and `decrement`, the number to change the URL by. For `up`, the number of levels to go up in the URL. """ # save the pre-jump position in the special ' mark self.set_mark("'") cmdutils.check_exclusive((tab, bg, window), 'tbw') widget = self._current_widget() url = self._current_url().adjusted(QUrl.RemoveFragment) handlers = { 'prev': functools.partial(navigate.prevnext, prev=True), 'next': functools.partial(navigate.prevnext, prev=False), 'up': navigate.path_up, 'decrement': functools.partial(navigate.incdec, inc_or_dec='decrement'), 'increment': functools.partial(navigate.incdec, inc_or_dec='increment'), } try: if where in ['prev', 'next']: handler = handlers[where] handler(browsertab=widget, win_id=self._win_id, baseurl=url, tab=tab, background=bg, window=window) elif where in ['up', 'increment', 'decrement']: new_url = handlers[where](url, count) self._open(new_url, tab, bg, window) else: # pragma: no cover raise ValueError("Got called with invalid value {} for " "`where'.".format(where)) except navigate.Error as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') @cmdutils.argument('count', count=True) def scroll_px(self, dx: int, dy: int, count=1): """Scroll the current tab by 'count * dx/dy' pixels. Args: dx: How much to scroll in x-direction. dy: How much to scroll in y-direction. count: multiplier """ dx *= count dy *= count cmdutils.check_overflow(dx, 'int') cmdutils.check_overflow(dy, 'int') self._current_widget().scroller.delta(dx, dy) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') @cmdutils.argument('count', count=True) def scroll(self, direction: typing.Union[str, int], count=1): """Scroll the current tab in the given direction. Args: direction: In which direction to scroll (up/down/left/right/top/bottom). count: multiplier """ tab = self._current_widget() funcs = { 'up': tab.scroller.up, 'down': tab.scroller.down, 'left': tab.scroller.left, 'right': tab.scroller.right, 'top': tab.scroller.top, 'bottom': tab.scroller.bottom, 'page-up': tab.scroller.page_up, 'page-down': tab.scroller.page_down, } try: func = funcs[direction] except KeyError: expected_values = ', '.join(sorted(funcs)) raise cmdexc.CommandError("Invalid value {!r} for direction - " "expected one of: {}".format( direction, expected_values)) if direction in ['top', 'bottom']: func() else: func(count=count) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') @cmdutils.argument('count', count=True) @cmdutils.argument('horizontal', flag='x') def scroll_perc(self, perc: float=None, horizontal=False, count=None): """Scroll to a specific percentage of the page. The percentage can be given either as argument or as count. If no percentage is given, the page is scrolled to the end. Args: perc: Percentage to scroll. horizontal: Scroll horizontally instead of vertically. count: Percentage to scroll. """ # save the pre-jump position in the special ' mark self.set_mark("'") if perc is None and count is None: perc = 100 elif count is not None: perc = count if horizontal: x = perc y = None else: x = None y = perc self._current_widget().scroller.to_perc(x, y) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') @cmdutils.argument('count', count=True) @cmdutils.argument('top_navigate', metavar='ACTION', choices=('prev', 'decrement')) @cmdutils.argument('bottom_navigate', metavar='ACTION', choices=('next', 'increment')) def scroll_page(self, x: float, y: float, *, top_navigate: str=None, bottom_navigate: str=None, count=1): """Scroll the frame page-wise. Args: x: How many pages to scroll to the right. y: How many pages to scroll down. bottom_navigate: :navigate action (next, increment) to run when scrolling down at the bottom of the page. top_navigate: :navigate action (prev, decrement) to run when scrolling up at the top of the page. count: multiplier """ tab = self._current_widget() if not tab.url().isValid(): # See https://github.com/The-Compiler/qutebrowser/issues/701 return if bottom_navigate is not None and tab.scroller.at_bottom(): self.navigate(bottom_navigate) return elif top_navigate is not None and tab.scroller.at_top(): self.navigate(top_navigate) return try: tab.scroller.delta_page(count * x, count * y) except OverflowError: raise cmdexc.CommandError( "Numeric argument is too large for internal int " "representation.") def _yank_url(self, what): """Helper method for yank() to get the URL to copy.""" assert what in ['url', 'pretty-url'], what flags = QUrl.RemovePassword if what == 'pretty-url': flags |= QUrl.DecodeReserved else: flags |= QUrl.FullyEncoded url = QUrl(self._current_url()) url_query = QUrlQuery() url_query_str = url.query() if '&' not in url_query_str and ';' in url_query_str: url_query.setQueryDelimiters('=', ';') url_query.setQuery(url_query_str) for key in dict(url_query.queryItems()): if key in config.get('general', 'yank-ignored-url-parameters'): url_query.removeQueryItem(key) url.setQuery(url_query) return url.toString(flags) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('what', choices=['selection', 'url', 'pretty-url', 'title', 'domain']) def yank(self, what='url', sel=False, keep=False): """Yank something to the clipboard or primary selection. Args: what: What to yank. - `url`: The current URL. - `pretty-url`: The URL in pretty decoded form. - `title`: The current page's title. - `domain`: The current scheme, domain, and port number. - `selection`: The selection under the cursor. sel: Use the primary selection instead of the clipboard. keep: Stay in visual mode after yanking the selection. """ if what == 'title': s = self._tabbed_browser.page_title(self._current_index()) elif what == 'domain': port = self._current_url().port() s = '{}://{}{}'.format(self._current_url().scheme(), self._current_url().host(), ':' + str(port) if port > -1 else '') elif what in ['url', 'pretty-url']: s = self._yank_url(what) what = 'URL' # For printing elif what == 'selection': caret = self._current_widget().caret s = caret.selection() if not caret.has_selection() or not s: message.info("Nothing to yank") return else: # pragma: no cover raise ValueError("Invalid value {!r} for `what'.".format(what)) if sel and utils.supports_selection(): target = "primary selection" else: sel = False target = "clipboard" utils.set_clipboard(s, selection=sel) if what != 'selection': message.info("Yanked {} to {}: {}".format(what, target, s)) else: message.info("{} {} yanked to {}".format( len(s), "char" if len(s) == 1 else "chars", target)) if not keep: modeman.leave(self._win_id, KeyMode.caret, "yank selected", maybe=True) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def zoom_in(self, count=1): """Increase the zoom level for the current tab. Args: count: How many steps to zoom in. """ tab = self._current_widget() try: perc = tab.zoom.offset(count) except ValueError as e: raise cmdexc.CommandError(e) message.info("Zoom level: {}%".format(perc)) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def zoom_out(self, count=1): """Decrease the zoom level for the current tab. Args: count: How many steps to zoom out. """ tab = self._current_widget() try: perc = tab.zoom.offset(-count) except ValueError as e: raise cmdexc.CommandError(e) message.info("Zoom level: {}%".format(perc)) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def zoom(self, zoom: int=None, count=None): """Set the zoom level for the current tab. The zoom can be given as argument or as [count]. If neither is given, the zoom is set to the default zoom. If both are given, use [count]. Args: zoom: The zoom percentage to set. count: The zoom percentage to set. """ level = count if count is not None else zoom if level is None: level = config.get('ui', 'default-zoom') tab = self._current_widget() try: tab.zoom.set_factor(float(level) / 100) except ValueError: raise cmdexc.CommandError("Can't zoom {}%!".format(level)) message.info("Zoom level: {}%".format(level)) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_only(self, prev=False, next_=False): """Close all tabs except for the current one. Args: prev: Keep tabs before the current. next_: Keep tabs after the current. """ cmdutils.check_exclusive((prev, next_), 'pn') cur_idx = self._tabbed_browser.currentIndex() assert cur_idx != -1 for i, tab in enumerate(self._tabbed_browser.widgets()): if (i == cur_idx or (prev and i < cur_idx) or (next_ and i > cur_idx)): continue else: self._tabbed_browser.close_tab(tab) @cmdutils.register(instance='command-dispatcher', scope='window') def undo(self): """Re-open a closed tab (optionally skipping [count] closed tabs).""" try: self._tabbed_browser.undo() except IndexError: raise cmdexc.CommandError("Nothing to undo!") @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def tab_prev(self, count=1): """Switch to the previous tab, or switch [count] tabs back. Args: count: How many tabs to switch back. """ if self._count() == 0: # Running :tab-prev after last tab was closed # See https://github.com/The-Compiler/qutebrowser/issues/1448 return newidx = self._current_index() - count if newidx >= 0: self._set_current_index(newidx) elif config.get('tabs', 'wrap'): self._set_current_index(newidx % self._count()) else: raise cmdexc.CommandError("First tab") @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def tab_next(self, count=1): """Switch to the next tab, or switch [count] tabs forward. Args: count: How many tabs to switch forward. """ if self._count() == 0: # Running :tab-next after last tab was closed # See https://github.com/The-Compiler/qutebrowser/issues/1448 return newidx = self._current_index() + count if newidx < self._count(): self._set_current_index(newidx) elif config.get('tabs', 'wrap'): self._set_current_index(newidx % self._count()) else: raise cmdexc.CommandError("Last tab") @cmdutils.register(instance='command-dispatcher', scope='window', deprecated="Use :open {clipboard}") def paste(self, sel=False, tab=False, bg=False, window=False): """Open a page from the clipboard. If the pasted text contains newlines, each line gets opened in its own tab. Args: sel: Use the primary selection instead of the clipboard. tab: Open in a new tab. bg: Open in a background tab. window: Open in new window. """ force_search = False if not utils.supports_selection(): sel = False try: text = utils.get_clipboard(selection=sel) except utils.ClipboardError as e: raise cmdexc.CommandError(e) text_urls = [u for u in text.split('\n') if u.strip()] if (len(text_urls) > 1 and not urlutils.is_url(text_urls[0]) and urlutils.get_path_if_valid( text_urls[0], check_exists=True) is None): force_search = True text_urls = [text] for i, text_url in enumerate(text_urls): if not window and i > 0: tab = False bg = True try: url = urlutils.fuzzy_url(text_url, force_search=force_search) except urlutils.InvalidUrlError as e: raise cmdexc.CommandError(e) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('index', completion=usertypes.Completion.tab) def buffer(self, index): """Select tab by index or url/title best match. Focuses window if necessary. Args: index: The [win_id/]index of the tab to focus. Or a substring in which case the closest match will be focused. """ index_parts = index.split('/', 1) try: for part in index_parts: int(part) except ValueError: model = instances.get(usertypes.Completion.tab) sf = sortfilter.CompletionFilterModel(source=model) sf.set_pattern(index) if sf.count() > 0: index = sf.data(sf.first_item()) index_parts = index.split('/', 1) else: raise cmdexc.CommandError( "No matching tab for: {}".format(index)) if len(index_parts) == 2: win_id = int(index_parts[0]) idx = int(index_parts[1]) elif len(index_parts) == 1: idx = int(index_parts[0]) active_win = objreg.get('app').activeWindow() if active_win is None: # Not sure how you enter a command without an active window... raise cmdexc.CommandError( "No window specified and couldn't find active window!") win_id = active_win.win_id if win_id not in objreg.window_registry: raise cmdexc.CommandError( "There's no window with id {}!".format(win_id)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) if not 0 < idx <= tabbed_browser.count(): raise cmdexc.CommandError( "There's no tab with index {}!".format(idx)) window = objreg.window_registry[win_id] window.activateWindow() window.raise_() tabbed_browser.setCurrentIndex(idx-1) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('index', choices=['last']) @cmdutils.argument('count', count=True) def tab_focus(self, index: typing.Union[str, int]=None, count=None): """Select the tab given as argument/[count]. If neither count nor index are given, it behaves like tab-next. If both are given, use count. Args: index: The tab index to focus, starting with 1. The special value `last` focuses the last focused tab (regardless of count). Negative indices count from the end, such that -1 is the last tab. count: The tab index to focus, starting with 1. """ if index == 'last': self._tab_focus_last() return index = count if count is not None else index if index is None: self.tab_next() return if index < 0: index = self._count() + index + 1 if 1 <= index <= self._count(): self._set_current_index(index - 1) else: raise cmdexc.CommandError("There's no tab with index {}!".format( index)) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('index', choices=['+', '-']) @cmdutils.argument('count', count=True) def tab_move(self, index: typing.Union[str, int]=None, count=None): """Move the current tab according to the argument and [count]. If neither is given, move it to the first position. Args: index: `+` or `-` to move relative to the current tab by count, or a default of 1 space. A tab index to move to that index. count: If moving relatively: Offset. If moving absolutely: New position (default: 0). This overrides the index argument, if given. """ if index in ['+', '-']: # relative moving new_idx = self._current_index() delta = 1 if count is None else count if index == '-': new_idx -= delta elif index == '+': # pragma: no branch new_idx += delta if config.get('tabs', 'wrap'): new_idx %= self._count() else: # absolute moving if count is not None: new_idx = count - 1 elif index is not None: new_idx = index - 1 if index >= 0 else index + self._count() else: new_idx = 0 if not 0 <= new_idx < self._count(): raise cmdexc.CommandError("Can't move tab to position {}!".format( new_idx + 1)) tab = self._current_widget() cur_idx = self._current_index() icon = self._tabbed_browser.tabIcon(cur_idx) label = self._tabbed_browser.page_title(cur_idx) cmdutils.check_overflow(cur_idx, 'int') cmdutils.check_overflow(new_idx, 'int') self._tabbed_browser.setUpdatesEnabled(False) try: color = self._tabbed_browser.tab_indicator_color(cur_idx) self._tabbed_browser.removeTab(cur_idx) self._tabbed_browser.insertTab(new_idx, tab, icon, label) self._set_current_index(new_idx) self._tabbed_browser.set_tab_indicator_color(new_idx, color) finally: self._tabbed_browser.setUpdatesEnabled(True) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0, no_replace_variables=True) def spawn(self, cmdline, userscript=False, verbose=False, detach=False): """Spawn a command in a shell. Args: userscript: Run the command as a userscript. You can use an absolute path, or store the userscript in one of those locations: - `~/.local/share/qutebrowser/userscripts` (or `$XDG_DATA_DIR`) - `/usr/share/qutebrowser/userscripts` verbose: Show notifications when the command started/exited. detach: Whether the command should be detached from qutebrowser. cmdline: The commandline to execute. """ try: cmd, *args = shlex.split(cmdline) except ValueError as e: raise cmdexc.CommandError("Error while splitting command: " "{}".format(e)) args = runners.replace_variables(self._win_id, args) log.procs.debug("Executing {} with args {}, userscript={}".format( cmd, args, userscript)) if userscript: # ~ expansion is handled by the userscript module. self._run_userscript(cmd, *args, verbose=verbose) else: cmd = os.path.expanduser(cmd) proc = guiprocess.GUIProcess(what='command', verbose=verbose, parent=self._tabbed_browser) if detach: proc.start_detached(cmd, args) else: proc.start(cmd, args) @cmdutils.register(instance='command-dispatcher', scope='window') def home(self): """Open main startpage in current tab.""" self.openurl(config.get('general', 'startpage')[0]) def _run_userscript(self, cmd, *args, verbose=False): """Run a userscript given as argument. Args: cmd: The userscript to run. args: Arguments to pass to the userscript. verbose: Show notifications when the command started/exited. """ env = { 'QUTE_MODE': 'command', } idx = self._current_index() if idx != -1: env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx) tab = self._tabbed_browser.currentWidget() if tab is not None and tab.caret.has_selection(): env['QUTE_SELECTED_TEXT'] = tab.caret.selection() try: env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True) except browsertab.UnsupportedOperationError: pass # FIXME:qtwebengine: If tab is None, run_async will fail! try: url = self._tabbed_browser.current_url() except qtutils.QtValueError: pass else: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) try: userscripts.run_async(tab, cmd, *args, win_id=self._win_id, env=env, verbose=verbose) except userscripts.Error as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', scope='window') def quickmark_save(self): """Save the current page as a quickmark.""" quickmark_manager = objreg.get('quickmark-manager') quickmark_manager.prompt_save(self._current_url()) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('name', completion=usertypes.Completion.quickmark_by_name) def quickmark_load(self, name, tab=False, bg=False, window=False): """Load a quickmark. Args: name: The name of the quickmark to load. tab: Load the quickmark in a new tab. bg: Load the quickmark in a new background tab. window: Load the quickmark in a new window. """ try: url = objreg.get('quickmark-manager').get(name) except urlmarks.Error as e: raise cmdexc.CommandError(str(e)) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('name', completion=usertypes.Completion.quickmark_by_name) def quickmark_del(self, name=None): """Delete a quickmark. Args: name: The name of the quickmark to delete. If not given, delete the quickmark for the current page (choosing one arbitrarily if there are more than one). """ quickmark_manager = objreg.get('quickmark-manager') if name is None: url = self._current_url() try: name = quickmark_manager.get_by_qurl(url) except urlmarks.DoesNotExistError as e: raise cmdexc.CommandError(str(e)) try: quickmark_manager.delete(name) except KeyError: raise cmdexc.CommandError("Quickmark '{}' not found!".format(name)) @cmdutils.register(instance='command-dispatcher', scope='window') def bookmark_add(self, url=None, title=None, toggle=False): """Save the current page as a bookmark, or a specific url. If no url and title are provided, then save the current page as a bookmark. If a url and title have been provided, then save the given url as a bookmark with the provided title. You can view all saved bookmarks on the link:qute://bookmarks[bookmarks page]. Args: url: url to save as a bookmark. If None, use url of current page. title: title of the new bookmark. toggle: remove the bookmark instead of raising an error if it already exists. """ if url and not title: raise cmdexc.CommandError('Title must be provided if url has ' 'been provided') bookmark_manager = objreg.get('bookmark-manager') if url is None: url = self._current_url() else: try: url = urlutils.fuzzy_url(url) except urlutils.InvalidUrlError as e: raise cmdexc.CommandError(e) if not title: title = self._current_title() try: was_added = bookmark_manager.add(url, title, toggle=toggle) except urlmarks.Error as e: raise cmdexc.CommandError(str(e)) else: msg = "Bookmarked {}!" if was_added else "Removed bookmark {}!" message.info(msg.format(url.toDisplayString())) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url) def bookmark_load(self, url, tab=False, bg=False, window=False, delete=False): """Load a bookmark. Args: url: The url of the bookmark to load. tab: Load the bookmark in a new tab. bg: Load the bookmark in a new background tab. window: Load the bookmark in a new window. delete: Whether to delete the bookmark afterwards. """ try: qurl = urlutils.fuzzy_url(url) except urlutils.InvalidUrlError as e: raise cmdexc.CommandError(e) self._open(qurl, tab, bg, window) if delete: self.bookmark_del(url) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url) def bookmark_del(self, url=None): """Delete a bookmark. Args: url: The url of the bookmark to delete. If not given, use the current page's url. """ if url is None: url = self._current_url().toString(QUrl.RemovePassword | QUrl.FullyEncoded) try: objreg.get('bookmark-manager').delete(url) except KeyError: raise cmdexc.CommandError("Bookmark '{}' not found!".format(url)) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') def follow_selected(self, *, tab=False): """Follow the selected text. Args: tab: Load the selected link in a new tab. """ try: self._current_widget().caret.follow_selected(tab=tab) except browsertab.WebTabError as e: raise cmdexc.CommandError(str(e)) @cmdutils.register(instance='command-dispatcher', name='inspector', scope='window') def toggle_inspector(self): """Toggle the web inspector. Note: Due a bug in Qt, the inspector will show incorrect request headers in the network tab. """ tab = self._current_widget() # FIXME:qtwebengine have a proper API for this page = tab._widget.page() # pylint: disable=protected-access try: if tab.data.inspector is None: tab.data.inspector = inspector.create() tab.data.inspector.inspect(page) else: tab.data.inspector.toggle(page) except inspector.WebInspectorError as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('dest_old', hide=True) def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None): """Download a given URL, or current page if no URL given. The form `:download [url] [dest]` is deprecated, use `:download --dest [dest] [url]` instead. Args: url: The URL to download. If not given, download the current page. dest_old: (deprecated) Same as dest. dest: The file path to write the download to, or None to ask. mhtml_: Download the current page and all assets as mhtml file. """ if dest_old is not None: message.warning(":download [url] [dest] is deprecated - use " ":download --dest [dest] [url]") if dest is not None: raise cmdexc.CommandError("Can't give two destinations for the" " download.") dest = dest_old # FIXME:qtwebengine do this with the QtWebEngine download manager? download_manager = objreg.get('qtnetwork-download-manager', scope='window', window=self._win_id) if url: if mhtml_: raise cmdexc.CommandError("Can only download the current page" " as mhtml.") url = urlutils.qurl_from_user_input(url) urlutils.raise_cmdexc_if_invalid(url) if dest is None: target = None else: target = downloads.FileDownloadTarget(dest) download_manager.get(url, target=target) elif mhtml_: self._download_mhtml(dest) else: qnam = self._current_widget().networkaccessmanager() if dest is None: target = None else: target = downloads.FileDownloadTarget(dest) download_manager.get(self._current_url(), qnam=qnam, target=target) def _download_mhtml(self, dest=None): """Download the current page as an MHTML file, including all assets. Args: dest: The file path to write the download to. """ tab = self._current_widget() if tab.backend == usertypes.Backend.QtWebEngine: raise cmdexc.CommandError("Download --mhtml is not implemented " "with QtWebEngine yet") if dest is None: suggested_fn = self._current_title() + ".mht" suggested_fn = utils.sanitize_filename(suggested_fn) filename = downloads.immediate_download_path() if filename is not None: mhtml.start_download_checked(filename, tab=tab) else: question = downloads.get_filename_question( suggested_filename=suggested_fn, url=tab.url(), parent=tab) question.answered.connect(functools.partial( mhtml.start_download_checked, tab=tab)) message.global_bridge.ask(question, blocking=False) else: mhtml.start_download_checked(dest, tab=tab) @cmdutils.register(instance='command-dispatcher', scope='window') def view_source(self): """Show the source of the current page.""" # pylint: disable=no-member # WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/ tab = self._current_widget() if tab.data.viewing_source: raise cmdexc.CommandError("Already viewing source!") def show_source_cb(source): """Show source as soon as it's ready.""" lexer = pygments.lexers.HtmlLexer() formatter = pygments.formatters.HtmlFormatter(full=True, linenos='table') highlighted = pygments.highlight(source, lexer, formatter) try: current_url = self._current_url() except cmdexc.CommandError as e: message.error(str(e)) return new_tab = self._tabbed_browser.tabopen(explicit=True) new_tab.set_html(highlighted, current_url) new_tab.data.viewing_source = True tab.dump_async(show_source_cb) @cmdutils.register(instance='command-dispatcher', scope='window', debug=True) def debug_dump_page(self, dest, plain=False): """Dump the current page's content to a file. Args: dest: Where to write the file to. plain: Write plain text instead of HTML. """ tab = self._current_widget() dest = os.path.expanduser(dest) def callback(data): try: with open(dest, 'w', encoding='utf-8') as f: f.write(data) except OSError as e: message.error('Could not write page: {}'.format(e)) else: message.info("Dumped page to {}.".format(dest)) tab.dump_async(callback, plain=plain) @cmdutils.register(instance='command-dispatcher', name='help', scope='window') @cmdutils.argument('topic', completion=usertypes.Completion.helptopic) def show_help(self, tab=False, bg=False, window=False, topic=None): r"""Show help about a command or setting. Args: tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. topic: The topic to show help for. - :__command__ for commands. - __section__\->__option__ for settings. """ if topic is None: path = 'index.html' elif topic.startswith(':'): command = topic[1:] if command not in cmdutils.cmd_dict: raise cmdexc.CommandError("Invalid command {}!".format( command)) path = 'commands.html#{}'.format(command) elif '->' in topic: parts = topic.split('->') if len(parts) != 2: raise cmdexc.CommandError("Invalid help topic {}!".format( topic)) try: config.get(*parts) except configexc.NoSectionError: raise cmdexc.CommandError("Invalid section {}!".format( parts[0])) except configexc.NoOptionError: raise cmdexc.CommandError("Invalid option {}!".format( parts[1])) path = 'settings.html#{}'.format(topic.replace('->', '-')) else: raise cmdexc.CommandError("Invalid help topic {}!".format(topic)) url = QUrl('qute://help/{}'.format(path)) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', scope='window') def messages(self, level='error', plain=False, tab=False, bg=False, window=False): """Show a log of past messages. Args: level: Include messages with `level` or higher severity. Valid values: vdebug, debug, info, warning, error, critical. plain: Whether to show plaintext (as opposed to html). tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. """ if level.upper() not in log.LOG_LEVELS: raise cmdexc.CommandError("Invalid log level {}!".format(level)) if plain: url = QUrl('qute://plainlog?level={}'.format(level)) else: url = QUrl('qute://log?level={}'.format(level)) self._open(url, tab, bg, window) def _open_editor_cb(self, elem): """Open editor after the focus elem was found in open_editor.""" if elem is None: message.error("No element focused!") return if not elem.is_editable(strict=True): message.error("Focused element is not editable!") return text = elem.value() ed = editor.ExternalEditor(self._tabbed_browser) ed.editing_finished.connect(functools.partial( self.on_editing_finished, elem)) ed.edit(text) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') def open_editor(self): """Open an external editor with the currently selected form field. The editor which should be launched can be configured via the `general -> editor` config option. """ tab = self._current_widget() tab.elements.find_focused(self._open_editor_cb) def on_editing_finished(self, elem, text): """Write the editor text into the form field and clean up tempfile. Callback for GUIProcess when the editor was closed. Args: elem: The WebElementWrapper which was modified. text: The new text to insert. """ try: elem.set_value(text) except webelem.Error as e: raise cmdexc.CommandError(str(e)) @cmdutils.register(instance='command-dispatcher', deprecated="Use :insert-text {primary}", modes=[KeyMode.insert], hide=True, scope='window', backend=usertypes.Backend.QtWebKit) def paste_primary(self): """Paste the primary selection at cursor position.""" try: self.insert_text(utils.get_clipboard(selection=True)) except utils.SelectionUnsupportedError: self.insert_text(utils.get_clipboard()) @cmdutils.register(instance='command-dispatcher', maxsplit=0, scope='window') def insert_text(self, text): """Insert text at cursor position. Args: text: The text to insert. """ tab = self._current_widget() def _insert_text_cb(elem): if elem is None: message.error("No element focused!") return try: elem.insert_text(text) except webelem.Error as e: message.error(str(e)) return tab.elements.find_focused(_insert_text_cb) @cmdutils.register(instance='command-dispatcher', scope='window', hide=True) @cmdutils.argument('filter_', choices=['id']) def click_element(self, filter_: str, value, *, target: usertypes.ClickTarget= usertypes.ClickTarget.normal): """Click the element matching the given filter. The given filter needs to result in exactly one element, otherwise, an error is shown. Args: filter_: How to filter the elements. id: Get an element based on its ID. value: The value to filter for. target: How to open the clicked element (normal/tab/tab-bg/window). """ tab = self._current_widget() def single_cb(elem): """Click a single element.""" if elem is None: message.error("No element found with id {}!".format(value)) return try: elem.click(target) except webelem.Error as e: message.error(str(e)) return # def multiple_cb(elems): # """Click multiple elements (with only one expected).""" # if not elems: # message.error("No element found!") # return # elif len(elems) != 1: # message.error("{} elements found!".format(len(elems))) # return # elems[0].click(target) handlers = { 'id': (tab.elements.find_id, single_cb), } handler, callback = handlers[filter_] handler(value, callback) def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev): """Callback called from search/search_next/search_prev. Args: found: Whether the text was found. tab: The AbstractTab in which the search was made. old_scroll_pos: The scroll position (QPoint) before the search. options: The options (dict) the search was made with. text: The text searched for. prev: Whether we're searching backwards (i.e. :search-prev) """ # :search/:search-next without reverse -> down # :search/:search-next with reverse -> up # :search-prev without reverse -> up # :search-prev with reverse -> down going_up = options['reverse'] ^ prev if found: # Check if the scroll position got smaller and show info. if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y(): message.info("Search hit BOTTOM, continuing at TOP") elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y(): message.info("Search hit TOP, continuing at BOTTOM") else: message.warning("Text '{}' not found on page!".format(text)) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) def search(self, text="", reverse=False): """Search for a text on the current page. With no text, clear results. Args: text: The text to search for. reverse: Reverse search direction. """ self.set_mark("'") tab = self._current_widget() tab.search.clear() options = { 'ignore_case': config.get('general', 'ignore-case'), 'reverse': reverse, } self._tabbed_browser.search_text = text self._tabbed_browser.search_options = dict(options) if text: cb = functools.partial(self._search_cb, tab=tab, old_scroll_pos=tab.scroller.pos_px(), options=options, text=text, prev=False) else: cb = None options['result_cb'] = cb tab.search.search(text, **options) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') @cmdutils.argument('count', count=True) def search_next(self, count=1): """Continue the search to the ([count]th) next term. Args: count: How many elements to ignore. """ tab = self._current_widget() window_text = self._tabbed_browser.search_text window_options = self._tabbed_browser.search_options if window_text is None: raise cmdexc.CommandError("No search done yet.") self.set_mark("'") if window_text is not None and window_text != tab.search.text: tab.search.clear() tab.search.search(window_text, **window_options) count -= 1 if count == 0: return cb = functools.partial(self._search_cb, tab=tab, old_scroll_pos=tab.scroller.pos_px(), options=window_options, text=window_text, prev=False) for _ in range(count - 1): tab.search.next_result() tab.search.next_result(result_cb=cb) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') @cmdutils.argument('count', count=True) def search_prev(self, count=1): """Continue the search to the ([count]th) previous term. Args: count: How many elements to ignore. """ tab = self._current_widget() window_text = self._tabbed_browser.search_text window_options = self._tabbed_browser.search_options if window_text is None: raise cmdexc.CommandError("No search done yet.") self.set_mark("'") if window_text is not None and window_text != tab.search.text: tab.search.clear() tab.search.search(window_text, **window_options) count -= 1 if count == 0: return cb = functools.partial(self._search_cb, tab=tab, old_scroll_pos=tab.scroller.pos_px(), options=window_options, text=window_text, prev=True) for _ in range(count - 1): tab.search.prev_result() tab.search.prev_result(result_cb=cb) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_next_line(self, count=1): """Move the cursor or selection to the next line. Args: count: How many lines to move. """ self._current_widget().caret.move_to_next_line(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_prev_line(self, count=1): """Move the cursor or selection to the prev line. Args: count: How many lines to move. """ self._current_widget().caret.move_to_prev_line(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_next_char(self, count=1): """Move the cursor or selection to the next char. Args: count: How many lines to move. """ self._current_widget().caret.move_to_next_char(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_prev_char(self, count=1): """Move the cursor or selection to the previous char. Args: count: How many chars to move. """ self._current_widget().caret.move_to_prev_char(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_end_of_word(self, count=1): """Move the cursor or selection to the end of the word. Args: count: How many words to move. """ self._current_widget().caret.move_to_end_of_word(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_next_word(self, count=1): """Move the cursor or selection to the next word. Args: count: How many words to move. """ self._current_widget().caret.move_to_next_word(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_prev_word(self, count=1): """Move the cursor or selection to the previous word. Args: count: How many words to move. """ self._current_widget().caret.move_to_prev_word(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') def move_to_start_of_line(self): """Move the cursor or selection to the start of the line.""" self._current_widget().caret.move_to_start_of_line() @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') def move_to_end_of_line(self): """Move the cursor or selection to the end of line.""" self._current_widget().caret.move_to_end_of_line() @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_start_of_next_block(self, count=1): """Move the cursor or selection to the start of next block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_start_of_next_block(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_start_of_prev_block(self, count=1): """Move the cursor or selection to the start of previous block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_start_of_prev_block(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_end_of_next_block(self, count=1): """Move the cursor or selection to the end of next block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_end_of_next_block(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_end_of_prev_block(self, count=1): """Move the cursor or selection to the end of previous block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_end_of_prev_block(count) @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') def move_to_start_of_document(self): """Move the cursor or selection to the start of the document.""" self._current_widget().caret.move_to_start_of_document() @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') def move_to_end_of_document(self): """Move the cursor or selection to the end of the document.""" self._current_widget().caret.move_to_end_of_document() @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') def toggle_selection(self): """Toggle caret selection mode.""" self._current_widget().caret.toggle_selection() @cmdutils.register(instance='command-dispatcher', hide=True, modes=[KeyMode.caret], scope='window') def drop_selection(self): """Drop selection and keep selection mode enabled.""" self._current_widget().caret.drop_selection() @cmdutils.register(instance='command-dispatcher', scope='window', debug=True) @cmdutils.argument('count', count=True) def debug_webaction(self, action, count=1): """Execute a webaction. See http://doc.qt.io/qt-5/qwebpage.html#WebAction-enum for the available actions. Args: action: The action to execute, e.g. MoveToNextChar. count: How many times to repeat the action. """ tab = self._current_widget() if tab.backend == usertypes.Backend.QtWebKit: assert QWebPage is not None member = getattr(QWebPage, action, None) base = QWebPage.WebAction elif tab.backend == usertypes.Backend.QtWebEngine: assert QWebEnginePage is not None member = getattr(QWebEnginePage, action, None) base = QWebEnginePage.WebAction if not isinstance(member, base): raise cmdexc.CommandError("{} is not a valid web action!".format( action)) for _ in range(count): # This whole command is backend-specific anyways, so it makes no # sense to introduce some API for this. # pylint: disable=protected-access tab._widget.triggerPageAction(member) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0, no_cmd_split=True) def jseval(self, js_code, quiet=False, *, world: typing.Union[usertypes.JsWorld, int]=None): """Evaluate a JavaScript string. Args: js_code: The string to evaluate. quiet: Don't show resulting JS object. world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to run the snippet in. """ if world is None: world = usertypes.JsWorld.jseval if quiet: jseval_cb = None else: def jseval_cb(out): if out is None: # Getting the actual error (if any) seems to be difficult. # The error does end up in # BrowserPage.javaScriptConsoleMessage(), but # distinguishing between :jseval errors and errors from the # webpage is not trivial... message.info('No output or error') else: # The output can be a string, number, dict, array, etc. But # *don't* output too much data, as this will make # qutebrowser hang out = str(out) if len(out) > 5000: out = out[:5000] + ' [...trimmed...]' message.info(out) widget = self._current_widget() widget.run_js_async(js_code, callback=jseval_cb, world=world) @cmdutils.register(instance='command-dispatcher', scope='window') def fake_key(self, keystring, global_=False): """Send a fake keypress or key string to the website or qutebrowser. :fake-key xy - sends the keychain 'xy' :fake-key <Ctrl-x> - sends Ctrl-x :fake-key <Escape> - sends the escape key Args: keystring: The keystring to send. global_: If given, the keys are sent to the qutebrowser UI. """ try: keyinfos = utils.parse_keystring(keystring) except utils.KeyParseError as e: raise cmdexc.CommandError(str(e)) for keyinfo in keyinfos: press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key, keyinfo.modifiers, keyinfo.text) release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key, keyinfo.modifiers, keyinfo.text) if global_: window = QApplication.focusWindow() if window is None: raise cmdexc.CommandError("No focused window!") QApplication.postEvent(window, press_event) QApplication.postEvent(window, release_event) else: try: tab = objreg.get('tab', scope='tab', tab='current') except objreg.RegistryUnavailableError: raise cmdexc.CommandError("No focused webview!") tab = self._current_widget() tab.send_event(press_event) tab.send_event(release_event) @cmdutils.register(instance='command-dispatcher', scope='window', debug=True, backend=usertypes.Backend.QtWebKit) def debug_clear_ssl_errors(self): """Clear remembered SSL error answers.""" self._current_widget().clear_ssl_errors() @cmdutils.register(instance='command-dispatcher', scope='window') def edit_url(self, url=None, bg=False, tab=False, window=False): """Navigate to a url formed in an external editor. The editor which should be launched can be configured via the `general -> editor` config option. Args: url: URL to edit; defaults to the current page url. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. """ cmdutils.check_exclusive((tab, bg, window), 'tbw') old_url = self._current_url().toString() ed = editor.ExternalEditor(self._tabbed_browser) # Passthrough for openurl args (e.g. -t, -b, -w) ed.editing_finished.connect(functools.partial( self._open_if_changed, old_url=old_url, bg=bg, tab=tab, window=window)) ed.edit(url or old_url) @cmdutils.register(instance='command-dispatcher', scope='window', hide=True) def set_mark(self, key): """Set a mark at the current scroll position in the current tab. Args: key: mark identifier; capital indicates a global mark """ self._tabbed_browser.set_mark(key) @cmdutils.register(instance='command-dispatcher', scope='window', hide=True) def jump_mark(self, key): """Jump to the mark named by `key`. Args: key: mark identifier; capital indicates a global mark """ self._tabbed_browser.jump_mark(key) def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False, window=False): """Open a URL unless it's already open in the tab. Args: old_url: The original URL to compare against. url: The URL to open. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. """ if bg or tab or window or url != old_url: self.openurl(url=url, bg=bg, tab=tab, window=window)
1
17,098
You can most likely remove that (and the try/finally) too, as we shouldn't have any flickering from removing/inserting tabs anymore now.
qutebrowser-qutebrowser
py
@@ -19,12 +19,15 @@ const localHooks = { * * @param {String} key Hook name. * @param {Function} callback Hook callback + * @return {Object} */ addLocalHook(key, callback) { if (!this._localHooks[key]) { this._localHooks[key] = []; } this._localHooks[key].push(callback); + + return this; }, /**
1
import {arrayEach} from './../helpers/array'; import {defineGetter} from './../helpers/object'; const MIXIN_NAME = 'localHooks'; /** * Mixin object to extend objects functionality for local hooks. * * @type {Object} */ const localHooks = { /** * Internal hooks storage. */ _localHooks: Object.create(null), /** * Add hook to the collection. * * @param {String} key Hook name. * @param {Function} callback Hook callback */ addLocalHook(key, callback) { if (!this._localHooks[key]) { this._localHooks[key] = []; } this._localHooks[key].push(callback); }, /** * Run hooks. * * @param {String} key Hook name. * @param {*} params */ runLocalHooks(key, ...params) { if (this._localHooks[key]) { arrayEach(this._localHooks[key], (callback) => callback.apply(this, params)); } }, /** * Clear all added hooks. */ clearLocalHooks() { this._localHooks = {}; }, }; defineGetter(localHooks, 'MIXIN_NAME', MIXIN_NAME, { writable: false, enumerable: false, }); export default localHooks;
1
14,795
Could you add `s` to `@return`?
handsontable-handsontable
js
@@ -57,11 +57,10 @@ module Selenium is_relative = Regexp.last_match(1).strip == '1' when /^Path=(.+)$/ path = Regexp.last_match(1).strip + p = path_for(name, is_relative, path) + @profile_paths[name] = p if p end end - - p = path_for(name, is_relative, path) - @profile_paths[name] = p if p end def path_for(name, is_relative, path)
1
# encoding: utf-8 # # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. module Selenium module WebDriver module Firefox # @api private class ProfilesIni def initialize @ini_path = File.join(Util.app_data_path, 'profiles.ini') @profile_paths = {} parse if File.exist?(@ini_path) end def [](name) path = @profile_paths[name] path && Profile.new(path) end def refresh @profile_paths.clear parse end private def parse string = File.read @ini_path name = nil is_relative = nil path = nil string.split("\n").each do |line| case line when /^\[Profile/ name, path = nil if path_for(name, is_relative, path) when /^Name=(.+)$/ name = Regexp.last_match(1).strip when /^IsRelative=(.+)$/ is_relative = Regexp.last_match(1).strip == '1' when /^Path=(.+)$/ path = Regexp.last_match(1).strip end end p = path_for(name, is_relative, path) @profile_paths[name] = p if p end def path_for(name, is_relative, path) return unless [name, path].any? is_relative ? File.join(Util.app_data_path, path) : path end end # ProfilesIni end # Firefox end # WebDriver end # Selenium
1
13,901
Moving this code inside the last case statement doesn't seem right. Why are we defining `name` and `is_relative` variables there if we aren't using them anywhere?
SeleniumHQ-selenium
js
@@ -290,9 +290,16 @@ func (at *addrTable) getAddrTable(key string) *serviceTable { // filterResourceType implement filter. Proxy cares "Service" and "ServiceList" type func filterResourceType(msg model.Message) []v1.Service { svcs := make([]v1.Service, 0) - content, ok := msg.Content.([]byte) - if !ok { - return svcs + var content []byte + var err error + switch msg.Content.(type) { + case []byte: + content = msg.GetContent().([]byte) + default: + content, err = json.Marshal(msg.GetContent()) + if err != nil { + klog.Errorf("marshal message to edgemesh failed, err: %v", err) + } } switch getReourceType(msg.GetResource()) { case constants.ResourceTypeService:
1
package proxy import ( "encoding/json" "fmt" "math/rand" "net" "strconv" "strings" "sync" "syscall" "time" "github.com/kubeedge/beehive/pkg/core/model" "github.com/kubeedge/kubeedge/common/constants" "github.com/kubeedge/kubeedge/edge/pkg/metamanager/client" "github.com/kubeedge/kubeedge/edgemesh/pkg/proxy/poll" vdev "github.com/kubeedge/kubeedge/edgemesh/pkg/proxy/virtualdevice" v1 "k8s.io/api/core/v1" "k8s.io/klog" ) type conntrack struct { lconn net.Conn rconn net.Conn connNum uint32 } type listener struct { ln net.Listener serviceName string fd int32 } type serviceTable struct { ip string ports []int32 targetPort []int32 lns []net.Listener } type addrTable struct { sync.Map } const ( tcpBufSize = 8192 defaultNetworkPrefix = "9.251." defaultIpPoolSize = 20 defaultTcpClientTimeout = time.Second * 2 defaultTcpReconnectTimes = 3 firstPort = 0 ) var ( epoll *poll.Epoll addrByService *addrTable unused []string serve sync.Map metaClient client.CoreInterface ipPoolSize uint16 ) // Init: init the proxy. create virtual device and assign ips, etc.. func Init() { go func() { unused = make([]string, 0) addrByService = &addrTable{} metaClient = client.New() //create virtual network device for { err := vdev.CreateDevice() if err == nil { break } klog.Warningf("[L4 Proxy] create Device is failed : %s", err) //there may have some exception need to be fixed on OS time.Sleep(2 * time.Second) } //configure vir ip ipPoolSize = 0 expandIpPool() //open epoll ep, err := poll.CreatePoll(pollCallback) if err != nil { vdev.DestroyDevice() klog.Errorf("[L4 Proxy] epoll is open failed : %s", err) return } epoll = ep go epoll.Loop() klog.Infof("[L4 Proxy] proxy is running now") }() } // expandIpPool expand ip pool if virtual ip is not enough func expandIpPool() { // make sure the subnet can't be "255.255" if ipPoolSize > 0xfffe { return } for idx := ipPoolSize + 1; idx <= ipPoolSize+defaultIpPoolSize; idx++ { ip := defaultNetworkPrefix + getSubNet(idx) if err := vdev.AddIP(ip); err != nil { vdev.DestroyDevice() klog.Errorf("[L4 Proxy] Add ip is failed : %s please checkout the env", err) return } unused = append(unused, ip) } ipPoolSize = defaultIpPoolSize } // pollCallback process the connection from client func pollCallback(fd int32) { value, ok := serve.Load(fd) if !ok { return } listen, ok := value.(listener) if !ok { return } ln := listen.ln serviceName := listen.serviceName conn, err := ln.Accept() if err != nil { return } getAndSetSocket(conn, false) go startTcpServer(conn, serviceName) } // startTcpServer implement L4 proxy to the real server func startTcpServer(conn net.Conn, svcName string) { portString := strings.Split(conn.LocalAddr().String(), ":") // portString is a standard form, such as "172.17.0.1:8080" localPort, _ := strconv.ParseInt(portString[1], 10, 32) addr, err := doLoadBalance(svcName, localPort) if err != nil { klog.Warningf("[L4 Proxy] %s call svc : %s encountered an error: %s", conn.RemoteAddr().String(), svcName, err) conn.Close() return } var proxyClient net.Conn for retry := 0; retry < defaultTcpReconnectTimes; retry++ { proxyClient, err = net.DialTimeout("tcp", addr.String(), defaultTcpClientTimeout) if err == nil { break } } // Error when connecting to server ,maybe timeout or any other error if err != nil { klog.Warningf("[L4 Proxy] %s call svc : %s to %s encountered an error: %s", conn.RemoteAddr().String(), svcName, addr.String(), err) conn.Close() return } ctk := &conntrack{ lconn: conn, rconn: proxyClient, } klog.Infof("[L4 Proxy] start a proxy server : %s,%s", svcName, addr.String()) go func() { ctk.processServerProxy() }() go func() { ctk.processClientProxy() }() } // doLoadBalance implement the loadbalance function func doLoadBalance(svcName string, lport int64) (net.Addr, error) { svc := strings.Split(svcName, ".") namespace, name := svc[0], svc[1] pods, err := metaClient.Services(namespace).GetPods(name) if err != nil { klog.Errorf("[L4 Proxy] get svc error : %s", err) } // checkout the status of pods runPods := make([]v1.Pod, 0, len(pods)) for i := 0; i < len(pods); i++ { if pods[i].Status.Phase == v1.PodRunning { runPods = append(runPods, pods[i]) } } // support random LB for the early version rand.Seed(time.Now().UnixNano()) idx := rand.Uint32() % uint32(len(runPods)) hostIP := runPods[idx].Status.HostIP st := addrByService.getAddrTable(svcName) index := 0 for i, p := range st.ports { if p == int32(lport) { index = i break } } // kubeedge edgemesh support bridge net ,so, use hostport to access tp := st.targetPort[index] targetPort := int32(0) for _, value := range runPods[idx].Spec.Containers { for _, v := range value.Ports { if v.ContainerPort == tp { targetPort = v.HostPort break } } } return &net.TCPAddr{ IP: net.ParseIP(hostIP), Port: int(targetPort), }, nil } // GetServiceServer returns the proxy IP by given service name func GetServiceServer(svcName string) string { st := addrByService.getAddrTable(svcName) if st == nil { klog.Warningf("[L4 Proxy] Serivce %s is not ready for Proxy.", svcName) return "Proxy-abnormal" } return st.ip } //getSubNet Implement uint16 convert to "uint8.uint8" func getSubNet(subNet uint16) string { arg1 := uint64(subNet & 0x00ff) arg2 := uint64((subNet & 0xff00) >> 8) return strconv.FormatUint(arg2, 10) + "." + strconv.FormatUint(arg1, 10) } //getAndSetSocket get file description and set socket blocking func getAndSetSocket(ln interface{}, nonblock bool) int { fd := int(-1) switch network := ln.(type) { case *net.TCPListener: file, err := network.File() if err != nil { klog.Infof("[L4 Proxy] get fd %s", err) } else { fd = int(file.Fd()) } case *net.TCPConn: file, err := network.File() if err != nil { klog.Infof("[L4 Proxy] get fd %s", err) } else { fd = int(file.Fd()) } default: klog.Infof("[L4 Proxy] unknow conn") } err := syscall.SetNonblock(fd, nonblock) if err != nil { klog.Errorf("[L4 Proxy] Set Nonblock : %s", err) } return fd } // addAddrTable is a thread-safe operation to add to map func (at *addrTable) addAddrTable(key string, value *serviceTable) { at.Store(key, value) } // addAddrTable is a thread-safe operation to del from map func (at *addrTable) delAddrTable(key string) { at.Delete(key) } // addAddrTable is a thread-safe operation to get from map func (at *addrTable) getAddrTable(key string) *serviceTable { value, ok := at.Load(key) if !ok { return nil } st, ok := value.(*serviceTable) if !ok { return nil } return st } // filterResourceType implement filter. Proxy cares "Service" and "ServiceList" type func filterResourceType(msg model.Message) []v1.Service { svcs := make([]v1.Service, 0) content, ok := msg.Content.([]byte) if !ok { return svcs } switch getReourceType(msg.GetResource()) { case constants.ResourceTypeService: s, err := handleServiceMessage(content) if err != nil { break } svcs = append(svcs, *s) case constants.ResourceTypeServiceList: ss, err := handleServiceMessageList(content) if err != nil { break } svcs = append(svcs, ss...) default: klog.Infof("[L4 Proxy] process other resource: %s", msg.Router.Resource) } return svcs } // MsgProcess process from metaManager and start a proxy server func MsgProcess(msg model.Message) { svcs := filterResourceType(msg) if len(svcs) == 0 { return } klog.Infof("[L4 Proxy] proxy process svcs : %d resource: %s\n", len(svcs), msg.Router.Resource) for _, svc := range svcs { svcName := svc.Namespace + "." + svc.Name if !IsL4Proxy(&svc) { // when server protocol update to http delServer(svcName) continue } klog.Infof("[L4 Proxy] proxy process svc : %s,%s", msg.GetOperation(), svcName) port := make([]int32, 0) targetPort := make([]int32, 0) for _, p := range svc.Spec.Ports { // this version will support TCP only if p.Protocol == "TCP" { port = append(port, p.Port) // this version will not support string type targetPort = append(targetPort, p.TargetPort.IntVal) } } if len(port) == 0 || len(targetPort) == 0 { continue } switch msg.GetOperation() { case "insert": addServer(svcName, port) case "delete": delServer(svcName) case "update": updateServer(svcName, port) default: klog.Infof("[L4 proxy] Unknown operation") } st := addrByService.getAddrTable(svcName) if st != nil { st.targetPort = targetPort } } } // addServer : add the proxy server func addServer(svcName string, ports []int32) { var ip string st := addrByService.getAddrTable(svcName) if st != nil { if len(ports) == 0 && len(st.ports) == 0 { unused = append(unused, st.ip) addrByService.delAddrTable(svcName) return } ip = st.ip } else { if len(ports) == 0 { return } if len(unused) == 0 { expandIpPool() } ip = unused[0] unused = unused[1:] } lns := make([]net.Listener, 0) for _, port := range ports { addr := ip + ":" + strconv.FormatUint(uint64(port), 10) klog.Infof("[L4 Proxy] Start listen %s,%d for proxy", addr, port) ln, err := net.Listen("tcp", addr) if err != nil { klog.Errorf("[L4 Proxy] %s", err) continue } lns = append(lns, ln) server := listener{ ln: ln, serviceName: svcName, fd: int32(getAndSetSocket(ln, true)), } serve.Store(server.fd, server) epoll.EpollCtrlAdd(server.fd) } if st != nil { st.lns = append(st.lns, lns...) st.ports = append(st.ports, ports...) } else { st = &serviceTable{ ip: ip, lns: lns, ports: ports, } addrByService.addAddrTable(svcName, st) } } // delServer implement delete the proxy server func delServer(svcName string) { st := addrByService.getAddrTable(svcName) if st == nil { return } unused = append(unused, st.ip) for _, ln := range st.lns { fd := getAndSetSocket(ln, true) epoll.EpollCtrlDel(fd) ln.Close() } addrByService.delAddrTable(svcName) } // updateServer implement update the proxy server func updateServer(svcName string, ports []int32) error { st := addrByService.getAddrTable(svcName) if st == nil { // if not exist addServer(svcName, ports) } else { oldports := make([]int32, len(st.ports)) copy(oldports, st.ports) for idx, oldport := range oldports { update := true for k, newport := range ports { if oldport == newport { ports = append(ports[:k], ports[k+1:]...) update = false break } } if update { fd := getAndSetSocket(st.lns[idx], true) epoll.EpollCtrlDel(fd) st.lns[idx].Close() st.lns = append(st.lns[:idx], st.lns[idx+1:]...) st.ports = append(st.ports[:idx], st.ports[idx+1:]...) } } addServer(svcName, ports) } return nil } //handleMessageFromMetaManager convert []byte to k8s Service struct func handleServiceMessage(content []byte) (*v1.Service, error) { var s v1.Service err := json.Unmarshal(content, &s) if err != nil { return nil, fmt.Errorf("[L4 Proxy] unmarshal message to Service failed, err: %v", err) } return &s, nil } //handleMessageFromMetaManager convert []byte to k8s Service struct func handleServiceMessageList(content []byte) ([]v1.Service, error) { var s []v1.Service err := json.Unmarshal(content, &s) if err != nil { return nil, fmt.Errorf("[L4 Proxy] unmarshal message to Service failed, err: %v", err) } return s, nil } // getReourceType returns the reourceType as a string func getReourceType(reource string) string { str := strings.Split(reource, "/") if len(str) == 3 { return str[1] } else if len(str) == 5 { return str[3] } else { return reource } } // processServerProxy process up link traffic func (c *conntrack) processClientProxy() { buf := make([]byte, tcpBufSize) for { n, err := c.lconn.Read(buf) //service caller closess the connection if n == 0 { c.lconn.Close() c.rconn.Close() break } if err != nil { c.lconn.Close() c.rconn.Close() break } _, rerr := c.rconn.Write(buf[:n]) if rerr != nil { c.lconn.Close() c.rconn.Close() break } } } // processServerProxy process down link traffic func (c *conntrack) processServerProxy() { buf := make([]byte, tcpBufSize) for { n, err := c.rconn.Read(buf) if n == 0 { c.rconn.Close() c.lconn.Close() break } if err != nil { c.rconn.Close() c.lconn.Close() break } _, rerr := c.lconn.Write(buf[:n]) if rerr != nil { c.rconn.Close() c.lconn.Close() break } } } //isL4Proxy Determine whether to use L4 proxy func IsL4Proxy(svc *v1.Service) bool { if len(svc.Spec.Ports) == 0 { return false } // In the defination of k8s-Service, we can use Service.Spec.Ports.Name to // indicate whether the Service enables L4 proxy mode. According to our // current L7 mode only support the http protocol. Other 7-layer protocols // are automatically degraded to tcp until supported port := svc.Spec.Ports[firstPort] switch port.Name { case "websocket", "grpc", "https", "tcp": return true case "http", "udp": return false default: return true } }
1
14,912
Will cause any bugs without this change? I've seen this code block(L293-302) many times... ;-)
kubeedge-kubeedge
go
@@ -82,6 +82,11 @@ namespace NLog /// </summary> public LogFactory Factory { get; private set; } + /// <summary> + /// Properties added with <see cref="WithProperty"/> or <see cref="SetProperty"/> + /// </summary> + public IDictionary<string, object> Properties => _contextProperties ?? new Dictionary<string, object>(); + /// <summary> /// Gets a value indicating whether logging is enabled for the specified level. /// </summary>
1
// // Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog { using System; using System.Collections.Generic; using System.ComponentModel; #if NET4_5 using System.Threading.Tasks; #endif using JetBrains.Annotations; using NLog.Internal; /// <summary> /// Provides logging interface and utility functions. /// </summary> [CLSCompliant(true)] public partial class Logger : ILogger { internal static readonly Type DefaultLoggerType = typeof(Logger); private Logger _contextLogger; private Dictionary<string, object> _contextProperties; private LoggerConfiguration _configuration; private volatile bool _isTraceEnabled; private volatile bool _isDebugEnabled; private volatile bool _isInfoEnabled; private volatile bool _isWarnEnabled; private volatile bool _isErrorEnabled; private volatile bool _isFatalEnabled; /// <summary> /// Initializes a new instance of the <see cref="Logger"/> class. /// </summary> protected internal Logger() { _contextLogger = this; } /// <summary> /// Occurs when logger configuration changes. /// </summary> public event EventHandler<EventArgs> LoggerReconfigured; /// <summary> /// Gets the name of the logger. /// </summary> public string Name { get; private set; } /// <summary> /// Gets the factory that created this logger. /// </summary> public LogFactory Factory { get; private set; } /// <summary> /// Gets a value indicating whether logging is enabled for the specified level. /// </summary> /// <param name="level">Log level to be checked.</param> /// <returns>A value of <see langword="true" /> if logging is enabled for the specified level, otherwise it returns <see langword="false" />.</returns> public bool IsEnabled(LogLevel level) { if (level == null) { throw new InvalidOperationException("Log level must be defined"); } return GetTargetsForLevel(level) != null; } /// <summary> /// Creates new logger that automatically appends the specified property to all log events (without changing current logger) /// </summary> /// <param name="propertyKey">Property Name</param> /// <param name="propertyValue">Property Value</param> /// <returns>New Logger object that automatically appends specified property</returns> public Logger WithProperty(string propertyKey, object propertyValue) { if (string.IsNullOrEmpty(propertyKey)) throw new ArgumentException(nameof(propertyKey)); Logger newLogger = Factory.CreateNewLogger(GetType()) ?? new Logger(); newLogger.Initialize(Name, _configuration, Factory); newLogger._contextProperties = CopyOnWrite(propertyKey, propertyValue); newLogger._contextLogger = _contextLogger; // Use the LoggerConfiguration of the parent Logger return newLogger; } /// <summary> /// Updates the specified context property for the current logger. The logger will append it for all log events /// </summary> /// <remarks> /// Will affect all locations/contexts that makes use of the same named logger object. /// </remarks> /// <param name="propertyKey">Property Name</param> /// <param name="propertyValue">Property Value</param> public void SetProperty(string propertyKey, object propertyValue) { if (string.IsNullOrEmpty(propertyKey)) throw new ArgumentException(nameof(propertyKey)); _contextProperties = CopyOnWrite(propertyKey, propertyValue); } private Dictionary<string, object> CopyOnWrite(string propertyKey, object propertyValue) { var contextProperties = _contextProperties; contextProperties = contextProperties != null ? new Dictionary<string, object>(contextProperties) : new Dictionary<string, object>(); contextProperties[propertyKey] = propertyValue; return contextProperties; } /// <summary> /// Writes the specified diagnostic message. /// </summary> /// <param name="logEvent">Log event.</param> public void Log(LogEventInfo logEvent) { var targetsForLevel = IsEnabled(logEvent.Level) ? GetTargetsForLevel(logEvent.Level) : null; if (targetsForLevel != null) { if (logEvent.LoggerName == null) logEvent.LoggerName = Name; WriteToTargets(logEvent, targetsForLevel); } } /// <summary> /// Writes the specified diagnostic message. /// </summary> /// <param name="wrapperType">The name of the type that wraps Logger.</param> /// <param name="logEvent">Log event.</param> public void Log(Type wrapperType, LogEventInfo logEvent) { var targetsForLevel = IsEnabled(logEvent.Level) ? GetTargetsForLevel(logEvent.Level) : null; if (targetsForLevel != null) { if (logEvent.LoggerName == null) logEvent.LoggerName = Name; WriteToTargets(wrapperType, logEvent, targetsForLevel); } } #region Log() overloads /// <overloads> /// Writes the diagnostic message at the specified level using the specified format provider and format parameters. /// </overloads> /// <summary> /// Writes the diagnostic message at the specified level. /// </summary> /// <typeparam name="T">Type of the value.</typeparam> /// <param name="level">The log level.</param> /// <param name="value">The value to be written.</param> public void Log<T>(LogLevel level, T value) { if (IsEnabled(level)) { WriteToTargets(level, null, value); } } /// <summary> /// Writes the diagnostic message at the specified level. /// </summary> /// <typeparam name="T">Type of the value.</typeparam> /// <param name="level">The log level.</param> /// <param name="formatProvider">An IFormatProvider that supplies culture-specific formatting information.</param> /// <param name="value">The value to be written.</param> public void Log<T>(LogLevel level, IFormatProvider formatProvider, T value) { if (IsEnabled(level)) { WriteToTargets(level, formatProvider, value); } } /// <summary> /// Writes the diagnostic message at the specified level. /// </summary> /// <param name="level">The log level.</param> /// <param name="messageFunc">A function returning message to be written. Function is not evaluated if logging is not enabled.</param> public void Log(LogLevel level, LogMessageGenerator messageFunc) { if (IsEnabled(level)) { if (messageFunc == null) { throw new ArgumentNullException(nameof(messageFunc)); } WriteToTargets(level, null, messageFunc()); } } /// <summary> /// Writes the diagnostic message and exception at the specified level. /// </summary> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> to be written.</param> /// <param name="exception">An exception to be logged.</param> /// <remarks>This method was marked as obsolete before NLog 4.3.11 and it may be removed in a future release.</remarks> [Obsolete("Use Log(LogLevel, String, Exception) method instead. Marked obsolete before v4.3.11")] public void LogException(LogLevel level, [Localizable(false)] string message, Exception exception) { Log(level, message, exception); } /// <summary> /// Writes the diagnostic message at the specified level using the specified parameters and formatting them with the supplied format provider. /// </summary> /// <param name="level">The log level.</param> /// <param name="formatProvider">An IFormatProvider that supplies culture-specific formatting information.</param> /// <param name="message">A <see langword="string" /> containing format items.</param> /// <param name="args">Arguments to format.</param> [MessageTemplateFormatMethod("message")] public void Log(LogLevel level, IFormatProvider formatProvider, [Localizable(false)] string message, params object[] args) { if (IsEnabled(level)) { WriteToTargets(level, formatProvider, message, args); } } /// <summary> /// Writes the diagnostic message at the specified level. /// </summary> /// <param name="level">The log level.</param> /// <param name="message">Log message.</param> public void Log(LogLevel level, [Localizable(false)] string message) { if (IsEnabled(level)) { WriteToTargets(level, null, message); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified parameters. /// </summary> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> containing format items.</param> /// <param name="args">Arguments to format.</param> [MessageTemplateFormatMethod("message")] public void Log(LogLevel level, [Localizable(false)] string message, params object[] args) { if (IsEnabled(level)) { WriteToTargets(level, message, args); } } /// <summary> /// Writes the diagnostic message and exception at the specified level. /// </summary> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> to be written.</param> /// <param name="exception">An exception to be logged.</param> /// <remarks>This method was marked as obsolete before NLog 4.3.11 and it may be removed in a future release.</remarks> [Obsolete("Use Log(LogLevel level, Exception exception, [Localizable(false)] string message, params object[] args) instead. Marked obsolete before v4.3.11")] public void Log(LogLevel level, [Localizable(false)] string message, Exception exception) { if (IsEnabled(level)) { WriteToTargets(level, exception, message, null); } } /// <summary> /// Writes the diagnostic message and exception at the specified level. /// </summary> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> to be written.</param> /// <param name="args">Arguments to format.</param> /// <param name="exception">An exception to be logged.</param> [MessageTemplateFormatMethod("message")] public void Log(LogLevel level, Exception exception, [Localizable(false)] string message, params object[] args) { if (IsEnabled(level)) { WriteToTargets(level, exception, message, args); } } /// <summary> /// Writes the diagnostic message and exception at the specified level. /// </summary> /// <param name="level">The log level.</param> /// <param name="formatProvider">An IFormatProvider that supplies culture-specific formatting information.</param> /// <param name="message">A <see langword="string" /> to be written.</param> /// <param name="args">Arguments to format.</param> /// <param name="exception">An exception to be logged.</param> [MessageTemplateFormatMethod("message")] public void Log(LogLevel level, Exception exception, IFormatProvider formatProvider, [Localizable(false)] string message, params object[] args) { if (IsEnabled(level)) { WriteToTargets(level, exception, formatProvider, message, args); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified parameter and formatting it with the supplied format provider. /// </summary> /// <typeparam name="TArgument">The type of the argument.</typeparam> /// <param name="level">The log level.</param> /// <param name="formatProvider">An IFormatProvider that supplies culture-specific formatting information.</param> /// <param name="message">A <see langword="string" /> containing one format item.</param> /// <param name="argument">The argument to format.</param> [MessageTemplateFormatMethod("message")] public void Log<TArgument>(LogLevel level, IFormatProvider formatProvider, [Localizable(false)] string message, TArgument argument) { if (IsEnabled(level)) { WriteToTargets(level, formatProvider, message, new object[] { argument }); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified parameter. /// </summary> /// <typeparam name="TArgument">The type of the argument.</typeparam> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> containing one format item.</param> /// <param name="argument">The argument to format.</param> [MessageTemplateFormatMethod("message")] public void Log<TArgument>(LogLevel level, [Localizable(false)] string message, TArgument argument) { if (IsEnabled(level)) { WriteToTargets(level, message, new object[] { argument }); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified arguments formatting it with the supplied format provider. /// </summary> /// <typeparam name="TArgument1">The type of the first argument.</typeparam> /// <typeparam name="TArgument2">The type of the second argument.</typeparam> /// <param name="level">The log level.</param> /// <param name="formatProvider">An IFormatProvider that supplies culture-specific formatting information.</param> /// <param name="message">A <see langword="string" /> containing one format item.</param> /// <param name="argument1">The first argument to format.</param> /// <param name="argument2">The second argument to format.</param> [MessageTemplateFormatMethod("message")] public void Log<TArgument1, TArgument2>(LogLevel level, IFormatProvider formatProvider, [Localizable(false)] string message, TArgument1 argument1, TArgument2 argument2) { if (IsEnabled(level)) { WriteToTargets(level, formatProvider, message, new object[] { argument1, argument2 }); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified parameters. /// </summary> /// <typeparam name="TArgument1">The type of the first argument.</typeparam> /// <typeparam name="TArgument2">The type of the second argument.</typeparam> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> containing one format item.</param> /// <param name="argument1">The first argument to format.</param> /// <param name="argument2">The second argument to format.</param> [MessageTemplateFormatMethod("message")] public void Log<TArgument1, TArgument2>(LogLevel level, [Localizable(false)] string message, TArgument1 argument1, TArgument2 argument2) { if (IsEnabled(level)) { WriteToTargets(level, message, new object[] { argument1, argument2 }); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified arguments formatting it with the supplied format provider. /// </summary> /// <typeparam name="TArgument1">The type of the first argument.</typeparam> /// <typeparam name="TArgument2">The type of the second argument.</typeparam> /// <typeparam name="TArgument3">The type of the third argument.</typeparam> /// <param name="level">The log level.</param> /// <param name="formatProvider">An IFormatProvider that supplies culture-specific formatting information.</param> /// <param name="message">A <see langword="string" /> containing one format item.</param> /// <param name="argument1">The first argument to format.</param> /// <param name="argument2">The second argument to format.</param> /// <param name="argument3">The third argument to format.</param> [MessageTemplateFormatMethod("message")] public void Log<TArgument1, TArgument2, TArgument3>(LogLevel level, IFormatProvider formatProvider, [Localizable(false)] string message, TArgument1 argument1, TArgument2 argument2, TArgument3 argument3) { if (IsEnabled(level)) { WriteToTargets(level, formatProvider, message, new object[] { argument1, argument2, argument3 }); } } /// <summary> /// Writes the diagnostic message at the specified level using the specified parameters. /// </summary> /// <typeparam name="TArgument1">The type of the first argument.</typeparam> /// <typeparam name="TArgument2">The type of the second argument.</typeparam> /// <typeparam name="TArgument3">The type of the third argument.</typeparam> /// <param name="level">The log level.</param> /// <param name="message">A <see langword="string" /> containing one format item.</param> /// <param name="argument1">The first argument to format.</param> /// <param name="argument2">The second argument to format.</param> /// <param name="argument3">The third argument to format.</param> [MessageTemplateFormatMethod("message")] public void Log<TArgument1, TArgument2, TArgument3>(LogLevel level, [Localizable(false)] string message, TArgument1 argument1, TArgument2 argument2, TArgument3 argument3) { if (IsEnabled(level)) { WriteToTargets(level, message, new object[] { argument1, argument2, argument3 }); } } private LogEventInfo PrepareLogEventInfo(LogEventInfo logEvent) { if (logEvent.FormatProvider == null) { logEvent.FormatProvider = Factory.DefaultCultureInfo; } if (_contextProperties != null) { foreach (var property in _contextProperties) { if (!logEvent.Properties.ContainsKey(property.Key)) { logEvent.Properties[property.Key] = property.Value; } } } return logEvent; } #endregion /// <summary> /// Runs the provided action. If the action throws, the exception is logged at <c>Error</c> level. The exception is not propagated outside of this method. /// </summary> /// <param name="action">Action to execute.</param> public void Swallow(Action action) { try { action(); } catch (Exception e) { Error(e); } } /// <summary> /// Runs the provided function and returns its result. If an exception is thrown, it is logged at <c>Error</c> level. /// The exception is not propagated outside of this method; a default value is returned instead. /// </summary> /// <typeparam name="T">Return type of the provided function.</typeparam> /// <param name="func">Function to run.</param> /// <returns>Result returned by the provided function or the default value of type <typeparamref name="T"/> in case of exception.</returns> public T Swallow<T>(Func<T> func) { return Swallow(func, default(T)); } /// <summary> /// Runs the provided function and returns its result. If an exception is thrown, it is logged at <c>Error</c> level. /// The exception is not propagated outside of this method; a fallback value is returned instead. /// </summary> /// <typeparam name="T">Return type of the provided function.</typeparam> /// <param name="func">Function to run.</param> /// <param name="fallback">Fallback value to return in case of exception.</param> /// <returns>Result returned by the provided function or fallback value in case of exception.</returns> public T Swallow<T>(Func<T> func, T fallback) { try { return func(); } catch (Exception e) { Error(e); return fallback; } } #if NET4_5 /// <summary> /// Logs an exception is logged at <c>Error</c> level if the provided task does not run to completion. /// </summary> /// <param name="task">The task for which to log an error if it does not run to completion.</param> /// <remarks>This method is useful in fire-and-forget situations, where application logic does not depend on completion of task. This method is avoids C# warning CS4014 in such situations.</remarks> public async void Swallow(Task task) { try { await task; } catch (Exception e) { Error(e); } } /// <summary> /// Returns a task that completes when a specified task to completes. If the task does not run to completion, an exception is logged at <c>Error</c> level. The returned task always runs to completion. /// </summary> /// <param name="task">The task for which to log an error if it does not run to completion.</param> /// <returns>A task that completes in the <see cref="TaskStatus.RanToCompletion"/> state when <paramref name="task"/> completes.</returns> public async Task SwallowAsync(Task task) { try { await task; } catch (Exception e) { Error(e); } } /// <summary> /// Runs async action. If the action throws, the exception is logged at <c>Error</c> level. The exception is not propagated outside of this method. /// </summary> /// <param name="asyncAction">Async action to execute.</param> public async Task SwallowAsync(Func<Task> asyncAction) { try { await asyncAction(); } catch (Exception e) { Error(e); } } /// <summary> /// Runs the provided async function and returns its result. If the task does not run to completion, an exception is logged at <c>Error</c> level. /// The exception is not propagated outside of this method; a default value is returned instead. /// </summary> /// <typeparam name="TResult">Return type of the provided function.</typeparam> /// <param name="asyncFunc">Async function to run.</param> /// <returns>A task that represents the completion of the supplied task. If the supplied task ends in the <see cref="TaskStatus.RanToCompletion"/> state, the result of the new task will be the result of the supplied task; otherwise, the result of the new task will be the default value of type <typeparamref name="TResult"/>.</returns> public async Task<TResult> SwallowAsync<TResult>(Func<Task<TResult>> asyncFunc) { return await SwallowAsync(asyncFunc, default(TResult)); } /// <summary> /// Runs the provided async function and returns its result. If the task does not run to completion, an exception is logged at <c>Error</c> level. /// The exception is not propagated outside of this method; a fallback value is returned instead. /// </summary> /// <typeparam name="TResult">Return type of the provided function.</typeparam> /// <param name="asyncFunc">Async function to run.</param> /// <param name="fallback">Fallback value to return if the task does not end in the <see cref="TaskStatus.RanToCompletion"/> state.</param> /// <returns>A task that represents the completion of the supplied task. If the supplied task ends in the <see cref="TaskStatus.RanToCompletion"/> state, the result of the new task will be the result of the supplied task; otherwise, the result of the new task will be the fallback value.</returns> public async Task<TResult> SwallowAsync<TResult>(Func<Task<TResult>> asyncFunc, TResult fallback) { try { return await asyncFunc(); } catch (Exception e) { Error(e); return fallback; } } #endif internal void Initialize(string name, LoggerConfiguration loggerConfiguration, LogFactory factory) { Name = name; Factory = factory; SetConfiguration(loggerConfiguration); } private void WriteToTargets(LogLevel level, [Localizable(false)] string message, object[] args) { WriteToTargets(level, Factory.DefaultCultureInfo, message, args); } private void WriteToTargets(LogLevel level, IFormatProvider formatProvider, [Localizable(false)] string message, object[] args) { var targetsForLevel = GetTargetsForLevel(level); if (targetsForLevel != null) { var logEvent = LogEventInfo.Create(level, Name, formatProvider, message, args); WriteToTargets(logEvent, targetsForLevel); } } private void WriteToTargets(LogLevel level, IFormatProvider formatProvider, [Localizable(false)] string message) { var targetsForLevel = GetTargetsForLevel(level); if (targetsForLevel != null) { // please note that this overload calls the overload of LogEventInfo.Create with object[] parameter on purpose - // to avoid unnecessary string.Format (in case of calling Create(LogLevel, string, IFormatProvider, object)) var logEvent = LogEventInfo.Create(level, Name, formatProvider, message, (object[])null); WriteToTargets(logEvent, targetsForLevel); } } private void WriteToTargets<T>(LogLevel level, IFormatProvider formatProvider, T value) { var targetsForLevel = GetTargetsForLevel(level); if (targetsForLevel != null) { var logEvent = LogEventInfo.Create(level, Name, formatProvider, value); WriteToTargets(logEvent, targetsForLevel); } } private void WriteToTargets(LogLevel level, Exception ex, [Localizable(false)] string message, object[] args) { var targetsForLevel = GetTargetsForLevel(level); if (targetsForLevel != null) { var logEvent = LogEventInfo.Create(level, Name, ex, Factory.DefaultCultureInfo, message, args); WriteToTargets(logEvent, targetsForLevel); } } private void WriteToTargets(LogLevel level, Exception ex, IFormatProvider formatProvider, [Localizable(false)] string message, object[] args) { var targetsForLevel = GetTargetsForLevel(level); if (targetsForLevel != null) { var logEvent = LogEventInfo.Create(level, Name, ex, formatProvider, message, args); WriteToTargets(logEvent, targetsForLevel); } } private void WriteToTargets([NotNull] LogEventInfo logEvent, [NotNull] TargetWithFilterChain targetsForLevel) { LoggerImpl.Write(DefaultLoggerType, targetsForLevel, PrepareLogEventInfo(logEvent), Factory); } private void WriteToTargets(Type wrapperType, [NotNull] LogEventInfo logEvent, [NotNull] TargetWithFilterChain targetsForLevel) { LoggerImpl.Write(wrapperType ?? DefaultLoggerType, targetsForLevel, PrepareLogEventInfo(logEvent), Factory); } internal void SetConfiguration(LoggerConfiguration newConfiguration) { _configuration = newConfiguration; // pre-calculate 'enabled' flags _isTraceEnabled = IsEnabled(LogLevel.Trace); _isDebugEnabled = IsEnabled(LogLevel.Debug); _isInfoEnabled = IsEnabled(LogLevel.Info); _isWarnEnabled = IsEnabled(LogLevel.Warn); _isErrorEnabled = IsEnabled(LogLevel.Error); _isFatalEnabled = IsEnabled(LogLevel.Fatal); OnLoggerReconfigured(EventArgs.Empty); } private TargetWithFilterChain GetTargetsForLevel(LogLevel level) { if (ReferenceEquals(_contextLogger, this)) return _configuration.GetTargetsForLevel(level); else return _contextLogger.GetTargetsForLevel(level); // Use the LoggerConfiguration of the parent Logger } /// <summary> /// Raises the event when the logger is reconfigured. /// </summary> /// <param name="e">Event arguments</param> protected virtual void OnLoggerReconfigured(EventArgs e) { LoggerReconfigured?.Invoke(this, e); } } }
1
19,342
You are opening a door to race-condition-hell by returning an unprotected dictionary. I recommend that you return `IReadOnlyDictionary` that only works on the platforms where it is known.
NLog-NLog
.cs
@@ -4367,7 +4367,7 @@ describe('Cursor', function() { } ); - it('should return a promise when no callback supplied to forEach method', function(done) { + it.skip('should return a promise when no callback supplied to forEach method', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false });
1
'use strict'; const test = require('./shared').assert; const setupDatabase = require('./shared').setupDatabase; const fs = require('fs'); const expect = require('chai').expect; const Long = require('bson').Long; const sinon = require('sinon'); const Buffer = require('safe-buffer').Buffer; const Writable = require('stream').Writable; const core = require('../../lib/core'); const ReadPreference = core.ReadPreference; describe('Cursor', function() { before(function() { return setupDatabase(this.configuration, [ 'cursorkilltest1', 'cursor_session_tests', 'cursor_session_tests2' ]); }); /** * @ignore * @api private */ it('cursorShouldBeAbleToResetOnToArrayRunningQueryAgain', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_to_a', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}); cursor.toArray(function(err) { test.equal(null, err); // Should fail if called again (cursor should be closed) cursor.toArray(function(err) { test.equal(null, err); // Should fail if called again (cursor should be closed) cursor.each(function(err, item) { test.equal(null, err); // Let's close the db if (!item) { client.close(done); } }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('cursor should close after first next operation', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('close_on_next', function(err, collection) { test.equal(null, err); collection.insert( [{ a: 1 }, { a: 1 }, { a: 1 }], configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}); cursor.batchSize(2); cursor.next(function(err) { test.equal(null, err); cursor.close(); client.close(done); }); } ); }); }); } }); /** * @ignore * @api private */ it('cursor should trigger getMore', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('trigger_get_more', function(err, collection) { test.equal(null, err); collection.insert( [{ a: 1 }, { a: 1 }, { a: 1 }], configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}); cursor.batchSize(2); cursor.toArray(function(err) { test.equal(null, err); client.close(done); }); } ); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorExplain', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_explain', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({ a: 1 }).explain(function(err, explaination) { test.equal(null, err); test.ok(explaination != null); // Let's close the db client.close(done); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorCount', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count', function(err, collection) { test.equal(null, err); collection.find().count(function(err) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().count(function(err, count) { test.equal(null, err); test.equal(10, count); test.ok(count.constructor === Number); collection.find({}, { limit: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); collection.find({}, { skip: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); db.collection('acollectionthatdoesn').count(function(err, count) { test.equal(null, err); test.equal(0, count); var cursor = collection.find(); cursor.count(function(err, count) { test.equal(null, err); test.equal(10, count); cursor.each(function(err, item) { test.equal(null, err); if (item == null) { cursor.count(function(err, count2) { test.equal(null, err); test.equal(10, count2); test.equal(count, count2); // Let's close the db client.close(done); }); } }); }); }); }); }); }); } insert(function() { finished(); }); }); }); }); } }); it('Should correctly execute cursor count with secondary readPreference', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: 'replicaset' } }, // The actual test we wish to run test: function(done) { const configuration = this.configuration; const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect((err, client) => { expect(err).to.not.exist; const db = client.db(configuration.db); let internalClientCursor; if (configuration.usingUnifiedTopology()) { internalClientCursor = sinon.spy(client.topology, 'cursor'); } else { internalClientCursor = sinon.spy(client.topology.s.coreTopology, 'cursor'); } const expectedReadPreference = new ReadPreference(ReadPreference.SECONDARY); const cursor = db.collection('countTEST').find({ qty: { $gt: 4 } }); cursor.count(true, { readPreference: ReadPreference.SECONDARY }, err => { expect(err).to.be.null; const operation = internalClientCursor.getCall(0).args[0]; expect(operation.options) .to.have.nested.property('readPreference') .that.deep.equals(expectedReadPreference); client.close(done); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorCountWithDottedCollectionName', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count.ext', function(err, collection) { test.equal(null, err); collection.find().count(function(err) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().count(function(err, count) { test.equal(null, err); test.equal(10, count); test.ok(count.constructor === Number); collection.find({}, { limit: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); collection.find({}, { skip: 5 }).count(function(err, count) { test.equal(null, err); test.equal(5, count); db.collection('acollectionthatdoesn').count(function(err, count) { test.equal(null, err); test.equal(0, count); var cursor = collection.find(); cursor.count(function(err, count) { test.equal(null, err); test.equal(10, count); cursor.each(function(err, item) { test.equal(null, err); if (item == null) { cursor.count(function(err, count2) { test.equal(null, err); test.equal(10, count2); test.equal(count, count2); // Let's close the db client.close(done); }); } }); }); }); }); }); }); } insert(function() { finished(); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteSortOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_sort', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function f() { var number_of_functions = 9; var finished = function() { number_of_functions = number_of_functions - 1; if (number_of_functions === 0) { client.close(done); } }; var cursor = collection.find().sort(['a', 1]); test.deepEqual(['a', 1], cursor.sortValue); finished(); cursor = collection.find().sort('a', 1); test.deepEqual([['a', 1]], cursor.sortValue); finished(); cursor = collection.find().sort('a', -1); test.deepEqual([['a', -1]], cursor.sortValue); finished(); cursor = collection.find().sort('a', 'asc'); test.deepEqual([['a', 'asc']], cursor.sortValue); finished(); cursor = collection.find().sort([ ['a', -1], ['b', 1] ]); var entries = cursor.sortValue.entries(); test.deepEqual(['a', -1], entries.next().value); test.deepEqual(['b', 1], entries.next().value); finished(); cursor = collection .find() .sort('a', 1) .sort('a', -1); test.deepEqual([['a', -1]], cursor.sortValue); finished(); cursor.next(function(err) { test.equal(null, err); try { cursor.sort(['a']); } catch (err) { test.equal('Cursor is closed', err.message); finished(); } }); collection .find() .sort('a', 25) .next(function(err) { test.equal( "Illegal sort clause, must be of the form [['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]", err.message ); finished(); }); collection .find() .sort(25) .next(function(err) { test.equal( "Illegal sort clause, must be of the form [['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]", err.message ); finished(); }); } insert(function() { f(); }); }); }); } }); /** * @ignore * @api private */ it('shouldThrowErrorOnEachWhenMissingCallback', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_each', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find(function(err, cursor) { test.equal(null, err); test.throws(function() { cursor.each(); }); client.close(done); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleLimitOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); db.createCollection('test_cursor_limit', function(err, collection) { function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection .find() .limit(5) .toArray(function(err, items) { test.equal(5, items.length); // Let's close the db test.equal(null, err); client.close(done); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleNegativeOneLimitOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_cursor_negative_one_limit', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection .find() .limit(-1) .toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); // Let's close the db client.close(done); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleAnyNegativeLimitOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_cursor_any_negative_limit', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection .find() .limit(-5) .toArray(function(err, items) { test.equal(null, err); test.equal(5, items.length); // Let's close the db client.close(done); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyReturnErrorsOnIllegalLimitValues', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_exceptions', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); }); collection.find(function(err, cursor) { test.equal(null, err); try { cursor.limit('not-an-integer'); } catch (err) { test.equal('limit requires an integer', err.message); } try { cursor.limit('not-an-integer'); test.ok(false); } catch (err) { test.equal('limit requires an integer', err.message); } }); collection.find(function(err, cursor) { test.equal(null, err); cursor.close(function(err, cursor) { test.equal(null, err); try { cursor.limit(1); } catch (err) { test.equal('Cursor is closed', err.message); } collection.find(function(err, cursor) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); try { cursor.limit(1); } catch (err) { test.equal('Cursor is closed', err.message); } try { cursor.limit(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } client.close(done); }); }); try { cursor.limit(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlySkipRecordsOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_skip', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find(function(err, cursor) { test.equal(null, err); cursor.count(function(err, count) { test.equal(null, err); test.equal(10, count); }); }); collection.find(function(err, cursor) { test.equal(null, err); cursor.toArray(function(err, items) { test.equal(null, err); test.equal(10, items.length); collection .find() .skip(2) .toArray(function(err, items2) { test.equal(null, err); test.equal(8, items2.length); // Check that we have the same elements var numberEqual = 0; var sliced = items.slice(2, 10); for (var i = 0; i < sliced.length; i++) { if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; } test.equal(8, numberEqual); // Let's close the db client.close(done); }); }); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyReturnErrorsOnIllegalSkipValues', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_skip_exceptions', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); }); try { collection.find().skip('not-an-integer'); } catch (err) { test.equal('skip requires an integer', err.message); } var cursor = collection.find(); cursor.next(function(err) { test.equal(null, err); try { cursor.skip(1); } catch (err) { test.equal('Cursor is closed', err.message); } var cursor2 = collection.find(); cursor2.close(function(err) { test.equal(null, err); try { cursor2.skip(1); } catch (err) { test.equal('Cursor is closed', err.message); } client.close(done); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldReturnErrorsOnIllegalBatchSizes', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_batchSize_exceptions', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); }); var cursor = collection.find(); try { cursor.batchSize('not-an-integer'); test.ok(false); } catch (err) { test.equal('batchSize requires an integer', err.message); } cursor = collection.find(); cursor.next(function(err) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); try { cursor.batchSize(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } var cursor2 = collection.find(); cursor2.close(function(err) { test.equal(null, err); try { cursor2.batchSize(1); test.ok(false); } catch (err) { test.equal('Cursor is closed', err.message); } client.close(done); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleChangesInBatchSizes', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_not_multiple_batch_size', function(err, collection) { test.equal(null, err); var records = 6; var batchSize = 2; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function() { test.equal(null, err); collection.find({}, { batchSize: batchSize }, function(err, cursor) { test.equal(null, err); //1st cursor.next(function(err, items) { test.equal(null, err); //cursor.items should contain 1 since nextObject already popped one test.equal(1, cursor.bufferedCount()); test.ok(items != null); //2nd cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //test batch size modification on the fly batchSize = 3; cursor.batchSize(batchSize); //3rd cursor.next(function(err, items) { test.equal(null, err); test.equal(2, cursor.bufferedCount()); test.ok(items != null); //4th cursor.next(function(err, items) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); test.ok(items != null); //5th cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //6th cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(done); }); }); }); }); }); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleBatchSize', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_multiple_batch_size', function(err, collection) { test.equal(null, err); //test with the last batch that is a multiple of batchSize var records = 4; var batchSize = 2; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({}, { batchSize: batchSize }, function(err, cursor) { test.equal(null, err); //1st cursor.next(function(err, items) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); test.ok(items != null); //2nd cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //3rd cursor.next(function(err, items) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); test.ok(items != null); //4th cursor.next(function(err, items) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); test.ok(items != null); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(done); }); }); }); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldHandleWhenLimitBiggerThanBatchSize', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_greater_than_batch_size', function(err, collection) { test.equal(null, err); var limit = 4; var records = 10; var batchSize = 3; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); //1st cursor.next(function(err) { test.equal(null, err); test.equal(2, cursor.bufferedCount()); //2nd cursor.next(function(err) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); //3rd cursor.next(function(err) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); //4th cursor.next(function(err) { test.equal(null, err); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(done); }); }); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldHandleLimitLessThanBatchSize', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_less_than_batch_size', function(err, collection) { test.equal(null, err); var limit = 2; var records = 10; var batchSize = 4; var docs = []; for (var i = 0; i < records; i++) { docs.push({ a: i }); } collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); //1st cursor.next(function(err) { test.equal(null, err); test.equal(1, cursor.bufferedCount()); //2nd cursor.next(function(err) { test.equal(null, err); test.equal(0, cursor.bufferedCount()); //No more cursor.next(function(err, items) { test.equal(null, err); test.ok(items == null); test.ok(cursor.isClosed()); client.close(done); }); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldHandleSkipLimitChaining', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldHandleSkipLimitChaining'); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().toArray(function(err, items) { test.equal(null, err); test.equal(10, items.length); collection .find() .limit(5) .skip(3) .toArray(function(err, items2) { test.equal(null, err); test.equal(5, items2.length); // Check that we have the same elements var numberEqual = 0; var sliced = items.slice(3, 8); for (var i = 0; i < sliced.length; i++) { if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; } test.equal(5, numberEqual); // Let's close the db client.close(done); }); }); } insert(function() { finished(); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyHandleLimitSkipChainingInline', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_limit_skip_chaining_inline', function(err, collection) { test.equal(null, err); function insert(callback) { var total = 10; for (var i = 0; i < 10; i++) { collection.insert({ x: i }, configuration.writeConcernMax(), function(e) { test.equal(null, e); total = total - 1; if (total === 0) callback(); }); } } function finished() { collection.find().toArray(function(err, items) { test.equal(null, err); test.equal(10, items.length); collection .find() .limit(5) .skip(3) .toArray(function(err, items2) { test.equal(null, err); test.equal(5, items2.length); // Check that we have the same elements var numberEqual = 0; var sliced = items.slice(3, 8); for (var i = 0; i < sliced.length; i++) { if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; } test.equal(5, numberEqual); // Let's close the db client.close(done); }); }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCloseCursorNoQuerySent', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_close_no_query_sent', function(err, collection) { test.equal(null, err); collection.find().close(function(err, cursor) { test.equal(null, err); test.equal(true, cursor.isClosed()); // Let's close the db client.close(done); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyRefillViaGetMoreCommand', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var COUNT = 1000; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_refill_via_get_more', function(err, collection) { test.equal(null, err); function insert(callback) { var docs = []; for (var i = 0; i < COUNT; i++) { docs.push({ a: i }); } collection.insertMany(docs, configuration.writeConcernMax(), callback); } function finished() { collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); }); var total = 0; collection.find({}, {}).each(function(err, item) { test.equal(null, err); if (item != null) { total = total + item.a; } else { test.equal(499500, total); collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); }); collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); var total2 = 0; collection.find().each(function(err, item) { test.equal(null, err); if (item != null) { total2 = total2 + item.a; } else { test.equal(499500, total2); collection.count(function(err, count) { test.equal(null, err); test.equal(COUNT, count); test.equal(total, total2); // Let's close the db client.close(done); }); } }); }); } }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyRefillViaGetMoreAlternativeCollection', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_refill_via_get_more_alt_coll', function(err, collection) { test.equal(null, err); var COUNT = 1000; function insert(callback) { var docs = []; for (var i = 0; i < COUNT; i++) { docs.push({ a: i }); } collection.insertMany(docs, configuration.writeConcernMax(), callback); } function finished() { collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); }); var total = 0; collection.find().each(function(err, item) { test.equal(null, err); if (item != null) { total = total + item.a; } else { test.equal(499500, total); collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); }); collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); var total2 = 0; collection.find().each(function(err, item) { test.equal(null, err); if (item != null) { total2 = total2 + item.a; } else { test.equal(499500, total2); collection.count(function(err, count) { test.equal(null, err); test.equal(1000, count); test.equal(total, total2); // Let's close the db client.close(done); }); } }); }); } }); } insert(function() { finished(); }); }); }); } }); /** * @ignore * @api private */ it('shouldCloseCursorAfterQueryHasBeenSent', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_close_after_query_sent', function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); var cursor = collection.find({ a: 1 }); cursor.next(function(err) { test.equal(null, err); cursor.close(function(err, cursor) { test.equal(null, err); test.equal(true, cursor.isClosed()); // Let's close the db client.close(done); }); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteCursorCountWithFields', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count_with_fields', function(err, collection) { test.equal(null, err); collection.save({ x: 1, a: 2 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({}) .project({ a: 1 }) .toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); test.equal(2, items[0].a); test.equal(undefined, items[0].x); client.close(done); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyCountWithFieldsUsingExclude', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('test_count_with_fields_using_exclude', function(err, collection) { test.equal(null, err); collection.save({ x: 1, a: 2 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({}, { fields: { x: 0 } }).toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); test.equal(2, items[0].a); test.equal(undefined, items[0].x); client.close(done); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_1', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var total = 0; // Create a cursor for the content var cursor = collection.find({}); cursor.count(function(err) { test.equal(null, err); // Ensure each returns all documents cursor.each(function(err, item) { test.equal(null, err); if (item != null) { total++; } else { cursor.count(function(err, c) { test.equal(null, err); test.equal(1000, c); test.equal(1000, total); client.close(done); }); } }); }); }); }); }); } }); /** * @ignore * @api private */ it('should be able to stream documents', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { docs[i] = { a: i + 1 }; } var count = 0; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_be_able_to_stream_documents', function(err, collection) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var paused = 0, closed = 0, resumed = 0, i = 0; var stream = collection.find().stream(); stream.on('data', function(doc) { test.equal(true, !!doc); test.equal(true, !!doc.a); count = count + 1; if (paused > 0 && 0 === resumed) { err = new Error('data emitted during pause'); return testDone(); } if (++i === 3) { stream.pause(); paused++; setTimeout(function() { stream.resume(); resumed++; }, 20); } }); stream.once('error', function(er) { err = er; testDone(); }); stream.once('end', function() { closed++; testDone(); }); function testDone() { test.equal(null, err); test.equal(i, docs.length); test.equal(1, closed); test.equal(1, paused); test.equal(1, resumed); test.strictEqual(stream.isClosed(), true); client.close(done); } }); }); }); } }); /** * @ignore * @api private */ it('immediately destroying a stream prevents the query from executing', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var i = 0, docs = [{ b: 2 }, { b: 3 }], doneCalled = 0; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection( 'immediately_destroying_a_stream_prevents_the_query_from_executing', function(err, collection) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var stream = collection.find().stream(); stream.on('data', function() { i++; }); stream.once('close', testDone('close')); stream.once('error', testDone('error')); stream.destroy(); function testDone() { return function(err) { ++doneCalled; if (doneCalled === 1) { test.equal(undefined, err); test.strictEqual(0, i); test.strictEqual(true, stream.isClosed()); client.close(done); } }; } }); } ); }); } }); /** * @ignore * @api private */ it('destroying a stream stops it', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.createCollection('destroying_a_stream_stops_it', function(err, collection) { test.equal(null, err); var docs = []; for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = 0, i = 0; var stream = collection.find().stream(); test.strictEqual(false, stream.isClosed()); stream.on('data', function() { if (++i === 5) { stream.destroy(); } }); stream.once('close', testDone); stream.once('error', testDone); function testDone(err) { ++finished; setTimeout(function() { test.strictEqual(undefined, err); test.strictEqual(5, i); test.strictEqual(1, finished); test.strictEqual(true, stream.isClosed()); client.close(done); }, 150); } }); }); }); } }); /** * @ignore * @api private */ // NOTE: skipped for use of topology manager it.skip('cursor stream errors', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.createCollection('cursor_stream_errors', function(err, collection) { test.equal(null, err); var docs = []; for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = 0, i = 0; var stream = collection.find({}, { batchSize: 5 }).stream(); stream.on('data', function() { if (++i === 4) { // Force restart configuration.manager.stop(9); } }); stream.once('close', testDone('close')); stream.once('error', testDone('error')); function testDone() { return function() { ++finished; if (finished === 2) { setTimeout(function() { test.equal(5, i); test.equal(true, stream.isClosed()); client.close(); configuration.manager.start().then(function() { done(); }); }, 150); } }; } }); }); }); } }); /** * @ignore * @api private */ it('cursor stream errors connection force closed', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { mongodb: '<=3.5.0', // NOTE: remove this when SERVER-30576 is resolved topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], unifiedTopology: false } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.createCollection('cursor_stream_errors', function(err, collection) { test.equal(null, err); var docs = []; for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = 0, i = 0; var stream = collection.find({}, { batchSize: 5 }).stream(); stream.on('data', function() { if (++i === 5) { client.topology .connections()[0] .write(Buffer.from('312312321321askdjljsaNCKnablibh')); } }); stream.once('close', testDone('close')); stream.once('error', testDone('error')); function testDone() { return function() { ++finished; if (finished === 2) { setTimeout(function() { test.equal(5, i); test.equal(2, finished); test.equal(true, stream.isClosed()); client.close(true, done); }, 150); } }; } }); }); }); } }); /** * @ignore * @api private */ it('cursor stream pipe', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('cursor_stream_pipe', function(err, collection) { test.equal(null, err); var docs = []; 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').forEach(function(name) { docs.push({ name: name }); }); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var filename = '/tmp/_nodemongodbnative_stream_out.txt', out = fs.createWriteStream(filename); // hack so we don't need to create a stream filter just to // stringify the objects (otherwise the created file would // just contain a bunch of [object Object]) // var toString = Object.prototype.toString; // Object.prototype.toString = function () { // return JSON.stringify(this); // } var stream = collection.find().stream({ transform: function(doc) { return JSON.stringify(doc); } }); stream.pipe(out); // Wait for output stream to close out.on('close', testDone); function testDone(err) { // Object.prototype.toString = toString; test.strictEqual(undefined, err); var contents = fs.readFileSync(filename, 'utf8'); test.ok(/Aaden/.test(contents)); test.ok(/Aaron/.test(contents)); test.ok(/Adrian/.test(contents)); test.ok(/Aditya/.test(contents)); test.ok(/Bob/.test(contents)); test.ok(/Joe/.test(contents)); fs.unlinkSync(filename); client.close(done); } }); }); }); } }); /** * @ignore */ it('shouldCloseDeadTailableCursors', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }, sessions: { skipLeakTests: true } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 10000000 }; db.createCollection('test_if_dead_tailable_cursors_close', options, function( err, collection ) { test.equal(null, err); var closeCount = 0; var errorOccurred = false; var count = 100; // Just hammer the server for (var i = 0; i < 100; i++) { collection.insert({ id: i }, { w: 'majority', wtimeout: 5000 }, function(err) { test.equal(null, err); count = count - 1; if (count === 0) { var stream = collection.find({}, { tailable: true, awaitData: true }).stream(); // let index = 0; stream.resume(); stream.on('error', function(err) { expect(err).to.exist; errorOccurred = true; }); var validator = () => { closeCount++; if (closeCount === 2) { expect(errorOccurred).to.equal(true); done(); } }; stream.on('end', validator); stream.on('close', validator); // Just hammer the server for (var i = 0; i < 100; i++) { const id = i; process.nextTick(function() { collection.insert({ id }, function(err) { test.equal(null, err); if (id === 99) { setTimeout(() => client.close()); } }); }); } } }); } }); }); } }); /** * @ignore */ it('shouldAwaitData', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data', options, function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, { tailable: true, awaitdata: true }); // Execute each cursor.each(function(err, result) { if (result) { cursor.kill(); } if (err != null) { // Even though cursor is exhausted, should not close session // // unless cursor is manually closed, due to awaitdata / tailable cursor.close(); client.close(done); } }); }); }); }); } }); /** * @ignore */ it('shouldAwaitDataWithDocumentsAvailable', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_no_docs', options, function(err, collection) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, { tailable: true, awaitdata: true }); var rewind = cursor.rewind; var called = false; cursor.rewind = function() { called = true; }; cursor.each(function(err) { if (err != null) { test.ok(called); cursor.rewind = rewind; client.close(done); } }); }); }); } }); /** * @ignore */ it('shouldAwaitDataUsingCursorFlag', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_cursor_flag', options, function(err, collection) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, {}); cursor.addCursorFlag('tailable', true); cursor.addCursorFlag('awaitData', true); cursor.each(function(err) { if (err != null) { // Even though cursor is exhausted, should not close session // unless cursor is manually closed, due to awaitdata / tailable cursor.close(); client.close(done); } else { cursor.kill(); } }); }); }); }); } }); /** * @ignore */ /* it('shouldNotAwaitDataWhenFalse = { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // NODE-98 var db = configuration.newClient(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false}); db.open(function(err, db) { var options = { capped: true, size: 8}; db.createCollection('should_not_await_data_when_false', options, function(err, collection) { collection.insert({a:1}, configuration.writeConcernMax(), function(err, result) { // should not timeout collection.find({}, {tailable:true, awaitdata:false}).each(function(err, result) { test.ok(err != null); }); client.close(done); }); }); }); } } */ /** * @ignore */ it('Should correctly retry tailable cursor connection', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { // http://www.mongodb.org/display/DOCS/Tailable+Cursors var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_retry_tailable_cursor', options, function( err, collection ) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection.find({}, { tailable: true, awaitdata: true }); cursor.each(function(err) { if (err != null) { // kill cursor b/c cursor is tailable / awaitable cursor.close(); client.close(done); } else { cursor.kill(); } }); }); }); }); } }); /** * @ignore */ it('shouldCorrectExecuteExplainHonoringLimit', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; docs[0] = { _keywords: [ 'compact', 'ii2gd', 'led', '24-48v', 'presse-etoupe', 'bexbgl1d24483', 'flash', '48v', 'eexd', 'feu', 'presse', 'compris', 'rouge', 'etoupe', 'iic', 'ii2gdeexdiict5', 'red', 'aet' ] }; docs[1] = { _keywords: [ 'reducteur', '06212', 'd20/16', 'manch', 'd20', 'manchon', 'ard', 'sable', 'irl', 'red' ] }; docs[2] = { _keywords: [ 'reducteur', '06214', 'manch', 'd25/20', 'd25', 'manchon', 'ard', 'sable', 'irl', 'red' ] }; docs[3] = { _keywords: [ 'bar', 'rac', 'boite', '6790178', '50-240/4-35', '240', 'branch', 'coulee', 'ddc', 'red', 'ip2x' ] }; docs[4] = { _keywords: [ 'bar', 'ip2x', 'boite', '6790158', 'ddi', '240', 'branch', 'injectee', '50-240/4-35?', 'red' ] }; docs[5] = { _keywords: [ 'bar', 'ip2x', 'boite', '6790179', 'coulee', '240', 'branch', 'sdc', '50-240/4-35?', 'red', 'rac' ] }; docs[6] = { _keywords: [ 'bar', 'ip2x', 'boite', '6790159', '240', 'branch', 'injectee', '50-240/4-35?', 'sdi', 'red' ] }; docs[7] = { _keywords: [ '6000', 'r-6000', 'resin', 'high', '739680', 'red', 'performance', 'brd', 'with', 'ribbon', 'flanges' ] }; docs[8] = { _keywords: ['804320', 'for', 'paint', 'roads', 'brd', 'red'] }; docs[9] = { _keywords: ['38mm', 'padlock', 'safety', '813594', 'brd', 'red'] }; docs[10] = { _keywords: ['114551', 'r6900', 'for', 'red', 'bmp71', 'brd', 'ribbon'] }; docs[11] = { _keywords: ['catena', 'diameter', '621482', 'rings', 'brd', 'legend', 'red', '2mm'] }; docs[12] = { _keywords: ['catena', 'diameter', '621491', 'rings', '5mm', 'brd', 'legend', 'red'] }; docs[13] = { _keywords: ['catena', 'diameter', '621499', 'rings', '3mm', 'brd', 'legend', 'red'] }; docs[14] = { _keywords: ['catena', 'diameter', '621508', 'rings', '5mm', 'brd', 'legend', 'red'] }; docs[15] = { _keywords: [ 'insert', 'for', 'cable', '3mm', 'carrier', '621540', 'blank', 'brd', 'ademark', 'red' ] }; docs[16] = { _keywords: [ 'insert', 'for', 'cable', '621544', '3mm', 'carrier', 'brd', 'ademark', 'legend', 'red' ] }; docs[17] = { _keywords: ['catena', 'diameter', '6mm', '621518', 'rings', 'brd', 'legend', 'red'] }; docs[18] = { _keywords: ['catena', 'diameter', '621455', '8mm', 'rings', 'brd', 'legend', 'red'] }; docs[19] = { _keywords: ['catena', 'diameter', '621464', 'rings', '5mm', 'brd', 'legend', 'red'] }; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); // Insert all the docs var collection = db.collection('shouldCorrectExecuteExplainHonoringLimit'); collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.ensureIndex({ _keywords: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({ _keywords: 'red' }, {}, { explain: true }) .limit(10) .toArray(function(err, result) { test.equal(null, err); test.ok(result != null); collection .find({ _keywords: 'red' }, {}) .limit(10) .explain(function(err, result) { test.equal(null, err); test.ok(result != null); client.close(done); }); }); }); }); }); } }); /** * @ignore */ it('shouldNotExplainWhenFalse', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var doc = { name: 'camera', _keywords: ['compact', 'ii2gd', 'led', 'red', 'aet'] }; var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldNotExplainWhenFalse'); collection.insert(doc, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({ _keywords: 'red' }, {}, { explain: false }) .limit(10) .toArray(function(err, result) { test.equal(null, err); test.equal('camera', result[0].name); client.close(done); }); }); }); } }); /** * @ignore */ it('shouldFailToSetReadPreferenceOnCursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); try { db.collection('shouldFailToSetReadPreferenceOnCursor') .find() .setReadPreference('notsecondary'); test.ok(false); } catch (err) {} // eslint-disable-line db.collection('shouldFailToSetReadPreferenceOnCursor') .find() .setReadPreference('secondary'); client.close(done); }); } }); /** * @ignore * @api private */ it('shouldNotFailDueToStackOverflowEach', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('shouldNotFailDueToStackOverflowEach', function(err, collection) { test.equal(null, err); var docs = []; var total = 0; for (var i = 0; i < 30000; i++) docs.push({ a: i }); var allDocs = []; var left = 0; while (docs.length > 0) { allDocs.push(docs.splice(0, 1000)); } // Get all batches we must insert left = allDocs.length; var totalI = 0; // Execute inserts for (i = 0; i < left; i++) { collection.insert(allDocs.shift(), configuration.writeConcernMax(), function(err, d) { test.equal(null, err); left = left - 1; totalI = totalI + d.length; if (left === 0) { collection.find({}).each(function(err, item) { test.equal(null, err); if (item == null) { test.equal(30000, total); client.close(done); } else { total++; } }); } }); } }); }); } }); /** * @ignore * @api private */ it('shouldNotFailDueToStackOverflowToArray', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('shouldNotFailDueToStackOverflowToArray', function(err, collection) { test.equal(null, err); var docs = []; for (var i = 0; i < 30000; i++) docs.push({ a: i }); var allDocs = []; var left = 0; while (docs.length > 0) { allDocs.push(docs.splice(0, 1000)); } // Get all batches we must insert left = allDocs.length; var totalI = 0; var timeout = 0; // Execute inserts for (i = 0; i < left; i++) { setTimeout(function() { collection.insert(allDocs.shift(), configuration.writeConcernMax(), function(err, d) { test.equal(null, err); left = left - 1; totalI = totalI + d.length; if (left === 0) { collection.find({}).toArray(function(err, items) { test.equal(null, err); test.equal(30000, items.length); client.close(done); }); } }); }, timeout); timeout = timeout + 100; } }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlySkipAndLimit', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldCorrectlySkipAndLimit'); var docs = []; for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection .find({}, { OrderNumber: 1 }) .skip(10) .limit(10) .toArray(function(err, items) { test.equal(null, err); test.equal(10, items[0].OrderNumber); collection .find({}, { OrderNumber: 1 }) .skip(10) .limit(10) .count(true, function(err, count) { test.equal(null, err); test.equal(10, count); client.close(done); }); }); }); }); } }); /** * @ignore * @api private */ it('shouldFailToTailANormalCollection', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var collection = db.collection('shouldFailToTailANormalCollection'); var docs = []; for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); const cursor = collection.find({}, { tailable: true }); cursor.each(function(err) { test.ok(err instanceof Error); test.ok(typeof err.code === 'number'); // Close cursor b/c we did not exhaust cursor cursor.close(); client.close(done); }); }); }); } }); /** * @ignore */ it('shouldCorrectlyUseFindAndCursorCount', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); // DOC_START // Establish connection to db client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); // Create a lot of documents to insert var docs = []; for (var i = 0; i < 100; i++) { docs.push({ a: i }); } // Create a collection db.createCollection('test_close_function_on_cursor_2', function(err, collection) { test.equal(null, err); // Insert documents into collection collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); collection.find({}, function(err, cursor) { test.equal(null, err); cursor.count(function(err, count) { test.equal(null, err); test.equal(100, count); client.close(done); }); }); }); }); }); // DOC_END } }); /** * @ignore */ it('should correctly apply hint to count command for cursor', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>2.5.5' } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); // DOC_START // Establish connection to db client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var col = db.collection('count_hint'); col.insert([{ i: 1 }, { i: 2 }], { w: 1 }, function(err) { test.equal(null, err); col.ensureIndex({ i: 1 }, function(err) { test.equal(null, err); col.find({ i: 1 }, { hint: '_id_' }).count(function(err, count) { test.equal(null, err); test.equal(1, count); col.find({}, { hint: '_id_' }).count(function(err, count) { test.equal(null, err); test.equal(2, count); col.find({ i: 1 }, { hint: 'BAD HINT' }).count(function(err) { test.ok(err != null); col.ensureIndex({ x: 1 }, { sparse: true }, function(err) { test.equal(null, err); col.find({ i: 1 }, { hint: 'x_1' }).count(function(err, count) { test.equal(null, err); test.equal(0, count); col.find({}, { hint: 'i_1' }).count(function(err, count) { test.equal(null, err); test.equal(2, count); client.close(done); }); }); }); }); }); }); }); }); }); // DOC_END } }); /** * @ignore */ it('Terminate each after first document by returning false', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); // Create a lot of documents to insert var docs = []; for (var i = 0; i < 100; i++) { docs.push({ a: i }); } // Create a collection db.createCollection('terminate_each_returning_false', function(err, collection) { test.equal(null, err); // Insert documents into collection collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); var finished = false; collection.find({}).each(function(err, doc) { test.equal(null, err); if (doc) { test.equal(finished, false); finished = true; client.close(done); return false; } }); }); }); }); } }); /** * @ignore */ it('Should correctly handle maxTimeMS as part of findOne options', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var donkey = { color: 'brown' }; db.collection('donkies').insertOne(donkey, function(err, result) { test.equal(null, err); var query = { _id: result.insertedId }; var options = { maxTimeMS: 1000 }; db.collection('donkies').findOne(query, options, function(err, doc) { test.equal(null, err); test.equal('brown', doc.color); client.close(done); }); }); }); } }); /** * @ignore */ it('Should correctly handle batchSize of 2', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.collection('should_correctly_handle_batchSize_2').insert( [{ x: 1 }, { x: 2 }, { x: 3 }], function(err) { test.equal(null, err); db.collection('should_correctly_handle_batchSize_2').find( {}, { batchSize: 2 }, function(error, cursor) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); cursor.next(function(err) { test.equal(null, err); client.close(done); }); }); }); } ); } ); }); } }); /** * @ignore */ it('Should report database name and collection name', { metadata: { requires: { topology: ['single'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.collection('myCollection').find({}, function(err, cursor) { test.equal(null, err); test.equal('myCollection', cursor.namespace.collection); test.equal('integration_tests', cursor.namespace.db); client.close(done); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor with maxTimeMS', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_2', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor.limit(100); cursor.skip(10); cursor.count(true, { maxTimeMS: 1000 }, function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor.limit(100); cursor.skip(10); cursor.maxTimeMS(100); cursor.count(function(err) { test.equal(null, err); client.close(done); }); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor with maxTimeMS set using legacy method', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_3', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}, { maxTimeMS: 100 }); cursor.toArray(function(err) { test.equal(null, err); client.close(done); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to toArray', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_toArray'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.toArray(function(err, docs) { test.equal(null, err); test.equal(10, docs.length); // Ensure all docs where mapped docs.forEach(function(x) { test.equal(1, x.a); }); client.close(done); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to next', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_next'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.next(function(err, doc) { test.equal(null, err); test.equal(1, doc.a); // Close cursor b/c we did not exhaust cursor cursor.close(); client.close(done); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to each', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_each'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.each(function(err, doc) { test.equal(null, err); if (doc) { test.equal(1, doc.a); } else { client.close(done); } }); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply map to forEach', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_forEach'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 2 }; }) .map(function(x) { return { a: x.a * x.a }; }) .batchSize(5) .limit(10); cursor.forEach( function(doc) { test.equal(4, doc.a); }, function(err) { test.equal(null, err); client.close(done); } ); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply multiple uses of map and apply forEach', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1000; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('map_mapmapforEach'); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection .find({}) .map(function() { return { a: 1 }; }) .batchSize(5) .limit(10); cursor.forEach( function(doc) { test.equal(1, doc.a); }, function(err) { test.equal(null, err); client.close(done); } ); }); }); } }); /** * @ignore * @api private */ it('Should correctly apply skip and limit to large set of documents', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('cursor_limit_skip_correctly'); // Insert x number of docs var ordered = collection.initializeUnorderedBulkOp(); for (var i = 0; i < 6000; i++) { ordered.insert({ a: i }); } ordered.execute({ w: 1 }, function(err) { test.equal(null, err); // Let's attempt to skip and limit collection .find({}) .limit(2016) .skip(2016) .toArray(function(err, docs) { test.equal(null, err); test.equal(2016, docs.length); client.close(done); }); }); }); } }); /** * @ignore */ it('should tail cursor using maxAwaitTimeMS for 3.2 or higher', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single'], mongodb: '>3.1.9' } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var options = { capped: true, size: 8 }; db.createCollection('should_await_data_max_awaittime_ms', options, function( err, collection ) { test.equal(null, err); collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) { test.equal(null, err); var s = new Date(); // Create cursor with awaitdata, and timeout after the period specified var cursor = collection .find({}) .addCursorFlag('tailable', true) .addCursorFlag('awaitData', true) .maxAwaitTimeMS(500); cursor.each(function(err, result) { test.equal(null, err); if (result) { setTimeout(function() { cursor.kill(); }, 300); } else { test.ok(new Date().getTime() - s.getTime() >= 500); // TODO: forced because the cursor is still open/active client.close(true, done); } }); }); }); }); } }); /** * @ignore * @api private */ it('Should not emit any events after close event emitted due to cursor killed', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); var collection = db.collection('cursor_limit_skip_correctly'); // Insert x number of docs var ordered = collection.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { ordered.insert({ a: i }); } ordered.execute({ w: 1 }, function(err) { test.equal(null, err); // Let's attempt to skip and limit var cursor = collection.find({}).batchSize(10); cursor.on('data', function() { cursor.destroy(); }); cursor.on('close', function() { client.close(done); }); }); }); } }); /** * @ignore * @api private */ it('shouldCorrectlyExecuteEnsureIndexWithNoCallback', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 1; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('shouldCorrectlyExecuteEnsureIndexWithNoCallback', function( err, collection ) { test.equal(null, err); // ensure index of createdAt index collection.ensureIndex({ createdAt: 1 }, function(err) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Find with sort collection .find() .sort(['createdAt', 'asc']) .toArray(function(err, items) { test.equal(null, err); test.equal(1, items.length); client.close(done); }); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly execute count on cursor with limit and skip', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; for (var i = 0; i < 50; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('Should_correctly_execute_count_on_cursor_1_', function( err, collection ) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor .limit(100) .skip(0) .count(function(err, c) { test.equal(null, err); test.equal(50, c); var cursor = collection.find({}); cursor .limit(100) .skip(0) .toArray(function(err) { test.equal(null, err); test.equal(50, c); client.close(done); }); }); }); }); }); } }); /** * @ignore * @api private */ it('Should correctly handle negative batchSize and set the limit', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var docs = []; var configuration = this.configuration; var Long = configuration.require.Long; for (var i = 0; i < 50; i++) { var d = new Date().getTime() + i * 1000; docs[i] = { a: i, createdAt: new Date(d) }; } var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); db.createCollection('negative_batch_size_and_limit_set', function(err, collection) { test.equal(null, err); // insert all docs collection.insert(docs, configuration.writeConcernMax(), function(err) { test.equal(null, err); // Create a cursor for the content var cursor = collection.find({}); cursor.batchSize(-10).next(function(err) { test.equal(null, err); test.ok(cursor.cursorState.cursorId.equals(Long.ZERO)); client.close(done); }); }); }); }); } }); it('Correctly decorate the cursor count command with skip, limit, hint, readConcern', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var started = []; var listener = require('../..').instrument(function(err) { test.equal(null, err); }); listener.on('started', function(event) { if (event.commandName === 'count') started.push(event); }); var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.collection('cursor_count_test', { readConcern: { level: 'local' } }) .find({ project: '123' }) .limit(5) .skip(5) .hint({ project: 1 }) .count(true, function(err) { test.equal(null, err); test.equal(1, started.length); if (started[0].command.readConcern) test.deepEqual({ level: 'local' }, started[0].command.readConcern); test.deepEqual({ project: 1 }, started[0].command.hint); test.equal(5, started[0].command.skip); test.equal(5, started[0].command.limit); listener.uninstrument(); client.close(done); }); }); } }); it('Correctly decorate the collection cursor count command with skip, limit, hint, readConcern', { // Add a tag that our runner can trigger on // in this case we are setting that node needs to be higher than 0.10.X to run metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, // The actual test we wish to run test: function(done) { var started = []; var listener = require('../..').instrument(function(err) { test.equal(null, err); }); listener.on('started', function(event) { if (event.commandName === 'count') started.push(event); }); var configuration = this.configuration; var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); client.connect(function(err, client) { var db = client.db(configuration.db); test.equal(null, err); db.collection('cursor_count_test1', { readConcern: { level: 'local' } }).count( { project: '123' }, { readConcern: { level: 'local' }, limit: 5, skip: 5, hint: { project: 1 } }, function(err) { test.equal(null, err); test.equal(1, started.length); if (started[0].command.readConcern) test.deepEqual({ level: 'local' }, started[0].command.readConcern); test.deepEqual({ project: 1 }, started[0].command.hint); test.equal(5, started[0].command.skip); test.equal(5, started[0].command.limit); listener.uninstrument(); client.close(done); } ); }); } }); it('Should properly kill a cursor', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>=3.2.0' } }, // The actual test we wish to run test: function() { // Load up the documents const docs = []; for (let i = 0; i < 1000; i += 1) { docs.push({ a: i }); } const configuration = this.configuration; const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 }); let cleanup = () => {}; let caughtError = undefined; return ( client // Connect .connect() .then(function(client) { cleanup = () => client.close(); const db = client.db(configuration.db); const collection = db.collection('cursorkilltest1'); // Insert 1000 documents return collection.insert(docs).then(() => { // Generate cursor for find operation const cursor = collection.find({}); // Iterate cursor past first element return cursor .next() .then(() => cursor.next()) .then(() => { // Confirm that cursorId is non-zero const longId = cursor.cursorState.cursorId; expect(longId) .to.be.an('object') .and.to.haveOwnProperty('_bsontype', 'Long'); const id = longId.toNumber(); expect(id).to.not.equal(0); // Kill cursor return new Promise((resolve, reject) => cursor.kill((err, r) => (err ? reject(err) : resolve(r))) ).then(response => { // sharded clusters will return a long, single return integers if ( response && response.cursorsKilled && Array.isArray(response.cursorsKilled) ) { response.cursorsKilled = response.cursorsKilled.map(id => typeof id === 'number' ? Long.fromNumber(id) : id ); } expect(response.ok).to.equal(1); expect(response.cursorsKilled[0].equals(longId)).to.be.ok; cursor.close(); return client.close(); }); }); }); }) // Clean up. Make sure that even in case of error, we still always clean up connection .catch(e => (caughtError = e)) .then(cleanup) .then(() => { if (caughtError) { throw caughtError; } }) ); } }); // NOTE: This is skipped because I don't think its correct or adds value. The expected error // is not an error with hasNext (from server), but rather a local TypeError which should // be caught anyway. The only solution here would be to wrap the entire top level call // in a try/catch which is not going to happen. it.skip('Should propagate hasNext errors when using a callback', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } }, test: function(done) { var configuration = this.configuration; var client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { test.equal(null, err); var db = client.db(configuration.db); var findCommand = { find: 'integration_tests.has_next_error_callback', limit: 0, skip: 0, query: {}, slaveOk: false }; var cursor = db.s.topology.cursor(db.namespace, findCommand, { readPreference: 42 }); cursor.hasNext(function(err) { test.ok(err !== null); test.equal(err.message, 'readPreference must be a ReadPreference instance'); done(); }); }); } }); it( 'should return implicit session to pool when client-side cursor exhausts results on initial query', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>=3.6.0' } }, test: function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { test.equal(null, err); const db = client.db(configuration.db); const collection = db.collection('cursor_session_tests'); collection.insertMany([{ a: 1, b: 2 }], function(err) { test.equal(null, err); const cursor = collection.find({}); cursor.next(function() { test.equal(client.topology.s.sessions.size, 0); client.close(done); }); }); }); } } ); it( 'should return implicit session to pool when client-side cursor exhausts results after a getMore', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'], mongodb: '>=3.6.0' } }, test: function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { test.equal(null, err); const db = client.db(configuration.db); const collection = db.collection('cursor_session_tests2'); const docs = [ { a: 1, b: 2 }, { a: 3, b: 4 }, { a: 5, b: 6 }, { a: 7, b: 8 }, { a: 9, b: 10 } ]; collection.insertMany(docs, function(err) { test.equal(null, err); const cursor = collection.find({}, { batchSize: 3 }); cursor.next(function() { test.equal(client.topology.s.sessions.size, 1); cursor.next(function() { test.equal(client.topology.s.sessions.size, 1); cursor.next(function() { test.equal(client.topology.s.sessions.size, 1); cursor.next(function() { test.equal(client.topology.s.sessions.size, 0); client.close(done); }); }); }); }); }); }); } } ); it('should return a promise when no callback supplied to forEach method', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { expect(err).to.not.exist; const db = client.db(configuration.db); const collection = db.collection('cursor_session_tests2'); const cursor = collection.find(); const promise = cursor.forEach(); expect(promise).to.exist.and.to.be.an.instanceof(cursor.s.promiseLibrary); promise.catch(() => {}); cursor.close(() => client.close(() => done())); }); }); it('should return false when exhausted and hasNext called more than once', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(function(err, client) { const db = client.db(configuration.db); db.createCollection('cursor_hasNext_test').then(function() { const cursor = db.collection('cursor_hasNext_test').find(); cursor .hasNext() .then(function(val1) { expect(val1).to.equal(false); return cursor.hasNext(); }) .then(function(val2) { expect(val2).to.equal(false); cursor.close(() => client.close(() => done())); }) .catch(err => { cursor.close(() => client.close(() => done(err))); }); }); }); }); function testTransformStream(config, done) { const client = config.client; const configuration = config.configuration; const collectionName = config.collectionName; const transformFunc = config.transformFunc; const expectedSet = config.expectedSet; client.connect(function(err, client) { const db = client.db(configuration.db); let collection, cursor; const docs = [ { _id: 0, a: { b: 1, c: 0 } }, { _id: 1, a: { b: 1, c: 0 } }, { _id: 2, a: { b: 1, c: 0 } } ]; const resultSet = new Set(); const transformParam = transformFunc != null ? { transform: transformFunc } : null; const close = e => cursor.close(() => client.close(() => done(e))); Promise.resolve() .then(() => db.createCollection(collectionName)) .then(() => (collection = db.collection(collectionName))) .then(() => collection.insertMany(docs)) .then(() => collection.find()) .then(_cursor => (cursor = _cursor)) .then(() => cursor.transformStream(transformParam)) .then(stream => { stream.on('data', function(doc) { resultSet.add(doc); }); stream.once('end', function() { expect(resultSet).to.deep.equal(expectedSet); close(); }); stream.once('error', function(e) { close(e); }); }) .catch(e => close(e)); }); } it('transformStream should apply the supplied transformation function to each document in the stream', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); const expectedDocs = [ { _id: 0, b: 1, c: 0 }, { _id: 1, b: 1, c: 0 }, { _id: 2, b: 1, c: 0 } ]; const config = { client: client, configuration: configuration, collectionName: 'transformStream-test-transform', transformFunc: doc => ({ _id: doc._id, b: doc.a.b, c: doc.a.c }), expectedSet: new Set(expectedDocs) }; testTransformStream(config, done); }); it('transformStream should return a stream of unmodified docs if no transform function applied', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); const expectedDocs = [ { _id: 0, a: { b: 1, c: 0 } }, { _id: 1, a: { b: 1, c: 0 } }, { _id: 2, a: { b: 1, c: 0 } } ]; const config = { client: client, configuration: configuration, collectionName: 'transformStream-test-notransform', transformFunc: null, expectedSet: new Set(expectedDocs) }; testTransformStream(config, done); }); it.skip('should apply parent read preference to count command', function(done) { // NOTE: this test is skipped because mongo orchestration does not test sharded clusters // with secondaries. This behavior should be unit tested const configuration = this.configuration; const client = configuration.newClient( { w: 1, readPreference: ReadPreference.SECONDARY }, { poolSize: 1, auto_reconnect: false, connectWithNoPrimary: true } ); client.connect(function(err, client) { expect(err).to.not.exist; const db = client.db(configuration.db); let collection, cursor, spy; const close = e => cursor.close(() => client.close(() => done(e))); Promise.resolve() .then(() => new Promise(resolve => setTimeout(() => resolve(), 500))) .then(() => db.createCollection('test_count_readPreference')) .then(() => (collection = db.collection('test_count_readPreference'))) .then(() => collection.find()) .then(_cursor => (cursor = _cursor)) .then(() => (spy = sinon.spy(cursor.topology, 'command'))) .then(() => cursor.count()) .then(() => expect(spy.firstCall.args[2]) .to.have.nested.property('readPreference.mode') .that.equals('secondary') ) .then(() => close()) .catch(e => close(e)); }); }); it('should not consume first document on hasNext when streaming', function(done) { const configuration = this.configuration; const client = configuration.newClient({ w: 1 }, { poolSize: 1, auto_reconnect: false }); client.connect(err => { expect(err).to.not.exist; this.defer(() => client.close()); const collection = client.db().collection('documents'); collection.drop(() => { const docs = [{ a: 1 }, { a: 2 }, { a: 3 }]; collection.insertMany(docs, err => { expect(err).to.not.exist; const cursor = collection.find({}, { sort: { a: 1 } }); cursor.hasNext((err, hasNext) => { expect(err).to.not.exist; expect(hasNext).to.be.true; const collected = []; const stream = new Writable({ objectMode: true, write: (chunk, encoding, next) => { collected.push(chunk); next(undefined, chunk); } }); cursor.on('close', () => { expect(collected).to.have.length(3); expect(collected).to.eql(docs); done(); }); cursor.pipe(stream); }); }); }); }); }); it('should correctly iterate all documents with a limit set', function(done) { const configuration = this.configuration; const client = configuration.newClient(); client.connect(err => { expect(err).to.not.exist; this.defer(() => client.close()); const collection = client.db().collection('documents'); collection.drop(() => { const docs = [{ a: 1 }, { a: 2 }, { a: 3 }]; collection.insertMany(docs, err => { expect(err).to.not.exist; let cursor = collection.find({}).limit(5); let bag = []; function iterate() { if (bag.length === docs.length) { expect(bag).to.eql(docs); return done(); } cursor.hasNext((err, hasNext) => { expect(err).to.not.exist; expect(hasNext).to.be.true; cursor.next((err, doc) => { expect(err).to.not.exist; bag.push(doc); iterate(); }); }); } iterate(); }); }); }); }); describe('transforms', function() { it('should correctly apply map transform to cursor as readable stream', function(done) { const configuration = this.configuration; const client = configuration.newClient(); client.connect(err => { expect(err).to.not.exist; this.defer(() => client.close()); const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x })); const coll = client.db(configuration.db).collection('cursor_stream_mapping'); coll.insertMany(docs, err => { expect(err).to.not.exist; const bag = []; const stream = coll .find() .project({ _id: 0, name: 1 }) .map(doc => ({ mapped: doc })) .on('data', doc => bag.push(doc)); stream.on('error', done).on('end', () => { expect(bag.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name }))); done(); }); }); }); }); it('should correctly apply map transform when converting cursor to array', function(done) { const configuration = this.configuration; const client = configuration.newClient(); client.connect(err => { expect(err).to.not.exist; this.defer(() => client.close()); const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x })); const coll = client.db(configuration.db).collection('cursor_toArray_mapping'); coll.insertMany(docs, err => { expect(err).to.not.exist; coll .find() .project({ _id: 0, name: 1 }) .map(doc => ({ mapped: doc })) .toArray((err, mappedDocs) => { expect(err).to.not.exist; expect(mappedDocs.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name }))); done(); }); }); }); }); }); });
1
17,362
also seems we should not skip this test
mongodb-node-mongodb-native
js
@@ -31,14 +31,17 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; class BaseFileScanTask implements FileScanTask { private final DataFile file; + private final DeleteFile[] deletes; private final String schemaString; private final String specString; private final ResidualEvaluator residuals; private transient PartitionSpec spec = null; - BaseFileScanTask(DataFile file, String schemaString, String specString, ResidualEvaluator residuals) { + BaseFileScanTask(DataFile file, DeleteFile[] deletes, String schemaString, String specString, + ResidualEvaluator residuals) { this.file = file; + this.deletes = deletes != null ? deletes : new DeleteFile[0]; this.schemaString = schemaString; this.specString = specString; this.residuals = residuals;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.ResidualEvaluator; import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; class BaseFileScanTask implements FileScanTask { private final DataFile file; private final String schemaString; private final String specString; private final ResidualEvaluator residuals; private transient PartitionSpec spec = null; BaseFileScanTask(DataFile file, String schemaString, String specString, ResidualEvaluator residuals) { this.file = file; this.schemaString = schemaString; this.specString = specString; this.residuals = residuals; } @Override public DataFile file() { return file; } @Override public PartitionSpec spec() { if (spec == null) { this.spec = PartitionSpecParser.fromJson(SchemaParser.fromJson(schemaString), specString); } return spec; } @Override public long start() { return 0; } @Override public long length() { return file.fileSizeInBytes(); } @Override public Expression residual() { return residuals.residualFor(file.partition()); } @Override public Iterable<FileScanTask> split(long targetSplitSize) { if (file.format().isSplittable()) { if (file.splitOffsets() != null) { return () -> new OffsetsAwareTargetSplitSizeScanTaskIterator(file.splitOffsets(), this, targetSplitSize); } else { return () -> new FixedSizeSplitScanTaskIterator(targetSplitSize, this); } } return ImmutableList.of(this); } @Override public String toString() { return MoreObjects.toStringHelper(this) .add("file", file.path()) .add("partition_data", file.partition()) .add("residual", residual()) .toString(); } /** * This iterator returns {@link FileScanTask} using guidance provided by split offsets. */ @VisibleForTesting static final class OffsetsAwareTargetSplitSizeScanTaskIterator implements Iterator<FileScanTask> { private final List<Long> offsets; private final List<Long> splitSizes; private final FileScanTask parentScanTask; private final long targetSplitSize; private int sizeIdx = 0; OffsetsAwareTargetSplitSizeScanTaskIterator( List<Long> offsetList, FileScanTask parentScanTask, long targetSplitSize) { this.offsets = ImmutableList.copyOf(offsetList); this.parentScanTask = parentScanTask; this.targetSplitSize = targetSplitSize; this.splitSizes = new ArrayList<>(offsets.size()); int lastIndex = offsets.size() - 1; for (int index = 0; index < lastIndex; index++) { splitSizes.add(offsets.get(index + 1) - offsets.get(index)); } splitSizes.add(parentScanTask.length() - offsets.get(lastIndex)); } @Override public boolean hasNext() { return sizeIdx < splitSizes.size(); } @Override public FileScanTask next() { if (!hasNext()) { throw new NoSuchElementException(); } int offsetIdx = sizeIdx; long currentSize = splitSizes.get(sizeIdx); sizeIdx += 1; // always consume at least one file split while (sizeIdx < splitSizes.size() && currentSize + splitSizes.get(sizeIdx) <= targetSplitSize) { currentSize += splitSizes.get(sizeIdx); sizeIdx += 1; } FileScanTask combinedTask = new SplitScanTask(offsets.get(offsetIdx), currentSize, parentScanTask); return combinedTask; } } @VisibleForTesting static final class FixedSizeSplitScanTaskIterator implements Iterator<FileScanTask> { private long offset; private long remainingLen; private long splitSize; private final FileScanTask fileScanTask; FixedSizeSplitScanTaskIterator(long splitSize, FileScanTask fileScanTask) { this.offset = 0; this.remainingLen = fileScanTask.length(); this.splitSize = splitSize; this.fileScanTask = fileScanTask; } @Override public boolean hasNext() { return remainingLen > 0; } @Override public FileScanTask next() { long len = Math.min(splitSize, remainingLen); final FileScanTask splitTask = new SplitScanTask(offset, len, fileScanTask); offset += len; remainingLen -= len; return splitTask; } } private static final class SplitScanTask implements FileScanTask { private final long len; private final long offset; private final FileScanTask fileScanTask; SplitScanTask(long offset, long len, FileScanTask fileScanTask) { this.offset = offset; this.len = len; this.fileScanTask = fileScanTask; } @Override public DataFile file() { return fileScanTask.file(); } @Override public PartitionSpec spec() { return fileScanTask.spec(); } @Override public long start() { return offset; } @Override public long length() { return len; } @Override public Expression residual() { return fileScanTask.residual(); } @Override public Iterable<FileScanTask> split(long splitSize) { throw new UnsupportedOperationException("Cannot split a task which is already split"); } } }
1
22,713
This is mostly for my understanding: is `DeleteFile[] deletes` a mandatory builder param now for file scan tasks? If not, from a v1 / v2 compatibility standpoint would it make sense to add an overloaded constructor?
apache-iceberg
java
@@ -123,6 +123,16 @@ public class ExecutionFlowDao { } } + public List<Pair<ExecutionReference, ExecutableFlow>> fetchAgedQueuedFlows(final Duration minAge) + throws ExecutorManagerException { + try { + return this.dbOperator.query(FetchAgedQueuedExecutableFlows.FETCH_FLOWS_QUEUED_FOR_LONG_TIME, + new FetchQueuedExecutableFlows(), System.currentTimeMillis() - minAge.toMillis()); + } catch (final SQLException e) { + throw new ExecutorManagerException("Error fetching active flows", e); + } + } + /** * fetch flow execution history with specified {@code projectId}, {@code flowId} and flow start * time >= {@code startTime}
1
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.db.EncodingType; import azkaban.db.SQLTransaction; import azkaban.utils.GZIPUtils; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.List; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.log4j.Logger; @Singleton public class ExecutionFlowDao { private static final Logger logger = Logger.getLogger(ExecutionFlowDao.class); private final DatabaseOperator dbOperator; private final MysqlNamedLock mysqlNamedLock; @Inject public ExecutionFlowDao(final DatabaseOperator dbOperator, final MysqlNamedLock mysqlNamedLock) { this.dbOperator = dbOperator; this.mysqlNamedLock = mysqlNamedLock; } public void uploadExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { final String useExecutorParam = flow.getExecutionOptions().getFlowParameters().get(ExecutionOptions.USE_EXECUTOR); final String executorId = StringUtils.isNotEmpty(useExecutorParam) ? useExecutorParam : null; final String flowPriorityParam = flow.getExecutionOptions().getFlowParameters().get(ExecutionOptions.FLOW_PRIORITY); final int flowPriority = StringUtils.isNotEmpty(flowPriorityParam) ? Integer.parseInt(flowPriorityParam) : ExecutionOptions.DEFAULT_FLOW_PRIORITY; final String INSERT_EXECUTABLE_FLOW = "INSERT INTO execution_flows " + "(project_id, flow_id, version, status, submit_time, submit_user, update_time, " + "use_executor, flow_priority) values (?,?,?,?,?,?,?,?,?)"; final long submitTime = flow.getSubmitTime(); /** * Why we need a transaction to get last insert ID? * Because "SELECT LAST_INSERT_ID()" needs to have the same connection * as inserting the new entry. * See https://dev.mysql.com/doc/refman/5.7/en/information-functions.html#function_last-insert-id */ final SQLTransaction<Long> insertAndGetLastID = transOperator -> { transOperator.update(INSERT_EXECUTABLE_FLOW, flow.getProjectId(), flow.getFlowId(), flow.getVersion(), flow.getStatus().getNumVal(), submitTime, flow.getSubmitUser(), submitTime, executorId, flowPriority); transOperator.getConnection().commit(); return transOperator.getLastInsertId(); }; try { final long id = this.dbOperator.transaction(insertAndGetLastID); logger.info("Flow given " + flow.getFlowId() + " given id " + id); flow.setExecutionId((int) id); updateExecutableFlow(flow); } catch (final SQLException e) { throw new ExecutorManagerException("Error creating execution.", e); } } List<ExecutableFlow> fetchFlowHistory(final int skip, final int num) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_ALL_EXECUTABLE_FLOW_HISTORY, new FetchExecutableFlows(), skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow History", e); } } List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_HISTORY, new FetchExecutableFlows(), projectId, flowId, skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow history", e); } } public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows() throws ExecutorManagerException { try { return this.dbOperator.query(FetchQueuedExecutableFlows.FETCH_QUEUED_EXECUTABLE_FLOW, new FetchQueuedExecutableFlows()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } /** * fetch flow execution history with specified {@code projectId}, {@code flowId} and flow start * time >= {@code startTime} * * @return the list of flows meeting the specified criteria */ public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final long startTime) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_BY_START_TIME, new FetchExecutableFlows(), projectId, flowId, startTime); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching historic flows", e); } } List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num, final Status status) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_BY_STATUS, new FetchExecutableFlows(), projectId, flowId, status.getNumVal(), skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } List<ExecutableFlow> fetchRecentlyFinishedFlows(final Duration maxAge) throws ExecutorManagerException { try { return this.dbOperator.query(FetchRecentlyFinishedFlows.FETCH_RECENTLY_FINISHED_FLOW, new FetchRecentlyFinishedFlows(), System.currentTimeMillis() - maxAge.toMillis(), Status.SUCCEEDED.getNumVal(), Status.KILLED.getNumVal(), Status.FAILED.getNumVal()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching recently finished flows", e); } } List<ExecutableFlow> fetchFlowHistory(final String projectNameContains, final String flowNameContains, final String userNameContains, final int status, final long startTime, final long endTime, final int skip, final int num) throws ExecutorManagerException { String query = FetchExecutableFlows.FETCH_BASE_EXECUTABLE_FLOW_QUERY; final List<Object> params = new ArrayList<>(); boolean first = true; if (projectNameContains != null && !projectNameContains.isEmpty()) { query += " JOIN projects p ON ef.project_id = p.id WHERE name LIKE ?"; params.add('%' + projectNameContains + '%'); first = false; } // todo kunkun-tang: we don't need the below complicated logics. We should just use a simple way. if (flowNameContains != null && !flowNameContains.isEmpty()) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " flow_id LIKE ?"; params.add('%' + flowNameContains + '%'); } if (userNameContains != null && !userNameContains.isEmpty()) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " submit_user LIKE ?"; params.add('%' + userNameContains + '%'); } if (status != 0) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " status = ?"; params.add(status); } if (startTime > 0) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " start_time > ?"; params.add(startTime); } if (endTime > 0) { if (first) { query += " WHERE "; } else { query += " AND "; } query += " end_time < ?"; params.add(endTime); } if (skip > -1 && num > 0) { query += " ORDER BY exec_id DESC LIMIT ?, ?"; params.add(skip); params.add(num); } try { return this.dbOperator.query(query, new FetchExecutableFlows(), params.toArray()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } void updateExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { updateExecutableFlow(flow, EncodingType.GZIP); } private void updateExecutableFlow(final ExecutableFlow flow, final EncodingType encType) throws ExecutorManagerException { final String UPDATE_EXECUTABLE_FLOW_DATA = "UPDATE execution_flows " + "SET status=?,update_time=?,start_time=?,end_time=?,enc_type=?,flow_data=? " + "WHERE exec_id=?"; byte[] data = null; try { // If this action fails, the execution must be failed. final String json = JSONUtils.toJSON(flow.toObject()); final byte[] stringData = json.getBytes("UTF-8"); data = stringData; // Todo kunkun-tang: use a common method to transform stringData to data. if (encType == EncodingType.GZIP) { data = GZIPUtils.gzipBytes(stringData); } } catch (final IOException e) { flow.setStatus(Status.FAILED); updateExecutableFlowStatusInDB(flow); throw new ExecutorManagerException("Error encoding the execution flow. Execution Id = " + flow.getExecutionId()); } catch (final RuntimeException re) { flow.setStatus(Status.FAILED); // Likely due to serialization error if ( data == null && re instanceof NullPointerException) { logger.warn("Failed to serialize executable flow for " + flow.getExecutionId()); logger.warn("NPE stacktrace" + ExceptionUtils.getStackTrace(re)); } updateExecutableFlowStatusInDB(flow); throw new ExecutorManagerException("Error encoding the execution flow due to " + "RuntimeException. Execution Id = " + flow.getExecutionId(), re); } try { this.dbOperator.update(UPDATE_EXECUTABLE_FLOW_DATA, flow.getStatus() .getNumVal(), flow.getUpdateTime(), flow.getStartTime(), flow .getEndTime(), encType.getNumVal(), data, flow.getExecutionId()); } catch (final SQLException e) { throw new ExecutorManagerException("Error updating flow.", e); } } private void updateExecutableFlowStatusInDB(final ExecutableFlow flow) throws ExecutorManagerException { final String UPDATE_FLOW_STATUS = "UPDATE execution_flows SET status = ?, update_time = ? " + "where exec_id = ?"; try { this.dbOperator.update(UPDATE_FLOW_STATUS, flow.getStatus().getNumVal(), System.currentTimeMillis(), flow.getExecutionId()); } catch (final SQLException e) { throw new ExecutorManagerException("Error updating flow.", e); } } public ExecutableFlow fetchExecutableFlow(final int execId) throws ExecutorManagerException { final FetchExecutableFlows flowHandler = new FetchExecutableFlows(); try { final List<ExecutableFlow> properties = this.dbOperator .query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW, flowHandler, execId); if (properties.isEmpty()) { return null; } else { return properties.get(0); } } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow id " + execId, e); } } /** * set executor id to null for the execution id */ public void unsetExecutorIdForExecution(final int executionId) throws ExecutorManagerException { final String UNSET_EXECUTOR = "UPDATE execution_flows SET executor_id = null, update_time = ? where exec_id = ?"; final SQLTransaction<Integer> unsetExecutor = transOperator -> transOperator.update(UNSET_EXECUTOR, System.currentTimeMillis(), executionId); try { this.dbOperator.transaction(unsetExecutor); } catch (final SQLException e) { throw new ExecutorManagerException("Error unsetting executor id for execution " + executionId, e); } } public int selectAndUpdateExecution(final int executorId, final boolean isActive) throws ExecutorManagerException { final String UPDATE_EXECUTION = "UPDATE execution_flows SET executor_id = ?, update_time = ? " + "where exec_id = ?"; final String selectExecutionForUpdate = isActive ? SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_ACTIVE : SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_INACTIVE; final SQLTransaction<Integer> selectAndUpdateExecution = transOperator -> { transOperator.getConnection().setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); final List<Integer> execIds = transOperator.query(selectExecutionForUpdate, new SelectFromExecutionFlows(), executorId); int execId = -1; if (!execIds.isEmpty()) { execId = execIds.get(0); transOperator.update(UPDATE_EXECUTION, executorId, System.currentTimeMillis(), execId); } transOperator.getConnection().commit(); return execId; }; try { return this.dbOperator.transaction(selectAndUpdateExecution); } catch (final SQLException e) { throw new ExecutorManagerException("Error selecting and updating execution with executor " + executorId, e); } } public int selectAndUpdateExecutionWithLocking(final int executorId, final boolean isActive) throws ExecutorManagerException { final String UPDATE_EXECUTION = "UPDATE execution_flows SET executor_id = ?, update_time = ? " + "where exec_id = ?"; final String selectExecutionForUpdate = isActive ? SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_ACTIVE : SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_INACTIVE; final SQLTransaction<Integer> selectAndUpdateExecution = transOperator -> { final String POLLING_LOCK_NAME = "execution_flows_polling"; final int GET_LOCK_TIMEOUT_IN_SECONDS = 5; int execId = -1; final boolean hasLocked = this.mysqlNamedLock.getLock(transOperator, POLLING_LOCK_NAME, GET_LOCK_TIMEOUT_IN_SECONDS); logger.info("ExecutionFlow polling lock value: " + hasLocked + " for executorId: " + executorId); if (hasLocked) { try { final List<Integer> execIds = transOperator.query(selectExecutionForUpdate, new SelectFromExecutionFlows(), executorId); if (CollectionUtils.isNotEmpty(execIds)) { execId = execIds.get(0); transOperator.update(UPDATE_EXECUTION, executorId, System.currentTimeMillis(), execId); } } finally { this.mysqlNamedLock.releaseLock(transOperator, POLLING_LOCK_NAME); logger.info("Released polling lock for executorId: " + executorId); } } else { logger.info("Could not acquire polling lock for executorId: " + executorId); } return execId; }; try { return this.dbOperator.transaction(selectAndUpdateExecution); } catch (final SQLException e) { throw new ExecutorManagerException("Error selecting and updating execution with executor " + executorId, e); } } public static class SelectFromExecutionFlows implements ResultSetHandler<List<Integer>> { private static final String SELECT_EXECUTION_FOR_UPDATE_FORMAT = "SELECT exec_id from execution_flows WHERE exec_id = (SELECT exec_id from execution_flows" + " WHERE status = " + Status.PREPARING.getNumVal() + " and executor_id is NULL and flow_data is NOT NULL and %s" + " ORDER BY flow_priority DESC, update_time ASC, exec_id ASC LIMIT 1) and executor_id is NULL FOR UPDATE"; public static final String SELECT_EXECUTION_FOR_UPDATE_ACTIVE = String.format(SELECT_EXECUTION_FOR_UPDATE_FORMAT, "(use_executor is NULL or use_executor = ?)"); public static final String SELECT_EXECUTION_FOR_UPDATE_INACTIVE = String.format(SELECT_EXECUTION_FOR_UPDATE_FORMAT, "use_executor = ?"); @Override public List<Integer> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Integer> execIds = new ArrayList<>(); do { final int execId = rs.getInt(1); execIds.add(execId); } while (rs.next()); return execIds; } } public static class FetchExecutableFlows implements ResultSetHandler<List<ExecutableFlow>> { static String FETCH_EXECUTABLE_FLOW_BY_START_TIME = "SELECT ef.exec_id, ef.enc_type, ef.flow_data, ef.status FROM execution_flows ef WHERE " + "project_id=? AND flow_id=? AND start_time >= ? ORDER BY start_time DESC"; static String FETCH_BASE_EXECUTABLE_FLOW_QUERY = "SELECT ef.exec_id, ef.enc_type, ef.flow_data, ef.status FROM execution_flows ef"; static String FETCH_EXECUTABLE_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE exec_id=?"; static String FETCH_ALL_EXECUTABLE_FLOW_HISTORY = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "ORDER BY exec_id DESC LIMIT ?, ?"; static String FETCH_EXECUTABLE_FLOW_HISTORY = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE project_id=? AND flow_id=? " + "ORDER BY exec_id DESC LIMIT ?, ?"; static String FETCH_EXECUTABLE_FLOW_BY_STATUS = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE project_id=? AND flow_id=? AND status=? " + "ORDER BY exec_id DESC LIMIT ?, ?"; @Override public List<ExecutableFlow> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<ExecutableFlow> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data != null) { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); execFlows.add(exFlow); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } /** * JDBC ResultSetHandler to fetch queued executions */ private static class FetchQueuedExecutableFlows implements ResultSetHandler<List<Pair<ExecutionReference, ExecutableFlow>>> { // Select queued unassigned flows private static final String FETCH_QUEUED_EXECUTABLE_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows" + " WHERE executor_id is NULL AND status = " + Status.PREPARING.getNumVal(); @Override public List<Pair<ExecutionReference, ExecutableFlow>> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Pair<ExecutionReference, ExecutableFlow>> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data == null) { ExecutionFlowDao.logger.error("Found a flow with empty data blob exec_id: " + id); } else { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); final ExecutionReference ref = new ExecutionReference(id); execFlows.add(new Pair<>(ref, exFlow)); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } private static class FetchRecentlyFinishedFlows implements ResultSetHandler<List<ExecutableFlow>> { // Execution_flows table is already indexed by end_time private static final String FETCH_RECENTLY_FINISHED_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE end_time > ? AND status IN (?, ?, ?)"; @Override public List<ExecutableFlow> handle( final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<ExecutableFlow> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data != null) { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); execFlows.add(exFlow); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } }
1
19,604
Can the error message reflect the purpose of the query more closely? Something like "Error fetching executions queued for a long time"
azkaban-azkaban
java
@@ -60,7 +60,7 @@ func (c *CreateInstances) validate(w *Workflow) error { } // Startup script checking. - if !sourceExists(ci.StartupScript) { + if ci.StartupScript != "" && !w.sourceExists(ci.StartupScript) { return fmt.Errorf("cannot create instance: file not found: %s", ci.StartupScript) }
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package workflow import ( "errors" "fmt" "path" "path/filepath" "sync" ) // CreateInstances is a Daisy CreateInstances workflow step. type CreateInstances []CreateInstance // CreateInstance describes a GCE instance. type CreateInstance struct { // Name of the instance. Name string // Disks to attach to the instance, must match a disk created in a previous step. // First one gets set as boot disk. At least one disk must be listed. AttachedDisks []string MachineType string // StartupScript is the local path to a startup script to use in this step. // This will be automatically mapped to the appropriate metadata key. StartupScript string // Additional metadata to set for the instance. Metadata map[string]string // OAuth2 scopes to give the instance. If non are specified // https://www.googleapis.com/auth/devstorage.read_only will be added. Scopes []string // Should this resource be cleaned up after the workflow? NoCleanup bool // Should we use the user-provided reference name as the actual resource name? ExactName bool } func (c *CreateInstances) validate(w *Workflow) error { for _, ci := range *c { // Disk checking. if len(ci.AttachedDisks) == 0 { return errors.New("cannot create instance: no disks provided") } for _, d := range ci.AttachedDisks { if !diskValid(w, d) { return fmt.Errorf("cannot create instance: disk not found: %s", d) } } // Startup script checking. if !sourceExists(ci.StartupScript) { return fmt.Errorf("cannot create instance: file not found: %s", ci.StartupScript) } // Try adding instance name. if err := validatedInstances.add(w, ci.Name); err != nil { return fmt.Errorf("error adding instance: %s", err) } } return nil } func (c *CreateInstances) run(w *Workflow) error { var wg sync.WaitGroup e := make(chan error) for _, ci := range *c { wg.Add(1) go func(ci CreateInstance) { defer wg.Done() name := ci.Name if !ci.ExactName { name = w.genName(ci.Name) } inst, err := w.ComputeClient.NewInstance(name, w.Project, w.Zone, ci.MachineType, ci.Scopes) if err != nil { e <- err return } for i, sourceDisk := range ci.AttachedDisks { var disk *resource var err error if isLink(sourceDisk) { // Real link. inst.AddPD("", sourceDisk, false, i == 0) } else if disk, err = w.getDisk(sourceDisk); err == nil { // Reference. inst.AddPD(disk.name, disk.link, false, i == 0) } else { e <- err return } } if ci.StartupScript != "" { var startup string switch filepath.Ext(ci.StartupScript) { case ".ps1", ".bat", ".cmd": startup = "windows-startup-script-url" default: startup = "startup-script-url" } inst.AddMetadata(map[string]string{startup: "gs://" + path.Join(w.bucket, w.sourcesPath, ci.StartupScript)}) } inst.AddMetadata(ci.Metadata) // Add standard Daisy metadata. md := map[string]string{ "daisy-sources-path": "gs://" + path.Join(w.bucket, w.sourcesPath), "daisy-logs-path": "gs://" + path.Join(w.bucket, w.logsPath), "daisy-outs-path": "gs://" + path.Join(w.bucket, w.outsPath), } inst.AddMetadata(md) inst.AddNetworkInterface("global/networks/default") w.logger.Printf("CreateInstances: creating instance %q.", name) i, err := inst.Insert() if err != nil { e <- err return } w.instanceRefs.add(ci.Name, &resource{ci.Name, name, i.SelfLink, ci.NoCleanup}) }(ci) } go func() { wg.Wait() e <- nil }() select { case err := <-e: return err case <-w.Cancel: // Wait so instances being created now can be deleted. wg.Wait() return nil } }
1
6,354
Are you making it so startup script HAS to be in sources?
GoogleCloudPlatform-compute-image-tools
go
@@ -128,7 +128,9 @@ void l2_weight_regularization::start_evaluation() { if (vals.Participating() && vals.GetLocalDevice() == El::Device::GPU && vals.RedundantRank() == i % vals.RedundantSize()) { - if (vals.LDim() == vals.LocalHeight()) { + if (vals.LocalWidth() < 1 || vals.LocalHeight() < 1) { + } else if (vals.LocalWidth() == 1 + || vals.LDim() == vals.LocalHeight()) { cublas::dot(handle, vals.LocalHeight() * vals.LocalWidth(), vals.LockedBuffer(), 1,
1
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/objective_functions/weight_regularization/l2.hpp" #include "lbann/models/model.hpp" #ifdef LBANN_HAS_GPU #include "lbann/utils/cublas.hpp" #endif // LBANN_HAS_GPU namespace { /** Compute the entry-wise sum of squares of a local matrix. */ EvalType sum_of_squares(const Mat& mat) { const El::Int height = mat.Height(); const El::Int width = mat.Width(); const El::Int ldim = mat.LDim(); const auto& __restrict__ buf = mat.LockedBuffer(); EvalType sqsum = EvalType(0); if (ldim == height) { // Parallelize single loop if data is contiguous const El::Int size = height*width; #pragma omp parallel for reduction(+:sqsum) for (El::Int i = 0; i < size; ++i) { const EvalType val = buf[i]; sqsum += val * val; } } else { // Parallelize double loop if data is not contiguous #pragma omp parallel for reduction(+:sqsum) collapse(2) for (El::Int j = 0; j < width; ++j) { for (El::Int i = 0; i < height; ++i) { const EvalType val = buf[i + j*ldim]; sqsum += val * val; } } } return sqsum; } } // namespace namespace lbann { l2_weight_regularization::l2_weight_regularization(EvalType scale_factor) : objective_function_term(scale_factor), m_sqsum(0), m_allreduce_started(false) {} void l2_weight_regularization::setup(model& m) { objective_function_term::setup(m); // Check that term has no layer pointers if (!m_layers.empty()) { LBANN_ERROR("attempted to setup L2 weight regularization with layer pointers"); } // Add all weights in model if no weights pointers are provided if (m_weights.empty()) { for (weights* w : m.get_weights()) { if (w->get_optimizer() != nullptr) { m_weights.push_back(w); } } } } void l2_weight_regularization::start_evaluation() { if (m_scale_factor == EvalType(0)) { return; } const int num_weights = m_weights.size(); // Each weights' local contribution to L2 regularization term CPUMat sqsums; El::Zeros(sqsums, num_weights, 1); #ifdef LBANN_HAS_GPU // Check whether any weights are on GPU bool using_gpus = false; for (const auto& w : m_weights) { if (w->get_values().GetLocalDevice() == El::Device::GPU) { using_gpus = true; break; } } // Compute L2 regularization term for weights on GPU // Note: cuBLAS is set to device pointer mode to pipeline GPU // kernels. Local contributions are only computed on one process in // each matrix's redundant communicator. if (using_gpus) { auto&& handle = El::GPUManager::cuBLASHandle(); CHECK_CUBLAS(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE)); // Initialize workspace GPUMat sqsums_d; #ifdef HYDROGEN_HAVE_CUB sqsums_d.SetMemoryMode(1); // CUB memory pool #endif El::Zeros(sqsums_d, num_weights, 1); // Compute local contributions for (int i = 0; i < num_weights; ++i) { const auto& vals = m_weights[i]->get_values(); if (vals.Participating() && vals.GetLocalDevice() == El::Device::GPU && vals.RedundantRank() == i % vals.RedundantSize()) { if (vals.LDim() == vals.LocalHeight()) { cublas::dot(handle, vals.LocalHeight() * vals.LocalWidth(), vals.LockedBuffer(), 1, vals.LockedBuffer(), 1, sqsums_d.Buffer(i, 0)); } else { /// @todo Support non-contiguous data LBANN_ERROR("we currently assume weights matrices are contiguous"); } } } CHECK_CUBLAS(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST)); El::Copy(sqsums_d, sqsums); } #endif // LBANN_HAS_GPU // Compute local contributions on CPU // Note: Only compute local contribution on one process in each // redundant communicator. m_sqsum = EvalType(0); for (int i = 0; i < num_weights; ++i) { const auto& vals = m_weights[i]->get_values(); if (vals.Participating() && vals.GetLocalDevice() == El::Device::CPU && vals.RedundantRank() == i % vals.RedundantSize()) { sqsums(i, 0) = sum_of_squares(vals.LockedMatrix()); } m_sqsum += sqsums(i, 0); } // Start aggregating local contributions get_comm().nb_allreduce(&m_sqsum, 1, get_comm().get_model_comm(), m_allreduce_req); m_allreduce_started = true; } EvalType l2_weight_regularization::finish_evaluation() { if (m_scale_factor == EvalType(0)) { return EvalType(0); } if (m_allreduce_started) { get_comm().wait(m_allreduce_req); } m_allreduce_started = false; return m_scale_factor * m_sqsum / 2; } void l2_weight_regularization::compute_weight_regularization() { if (m_scale_factor == EvalType(0)) { return; } // Compute gradient of L2 regularization term for weights for (auto&& w : m_weights) { w->get_optimizer()->add_to_gradient(w->get_values(), m_scale_factor); } } } // namespace lbann
1
13,148
Might be more clear to just have one if statement?
LLNL-lbann
cpp
@@ -87,7 +87,7 @@ class SparkReader: try: response_handler(response) except Exception as e: - current_app.logger.error('Error in the response handler: %s, data: %s %' + current_app.logger.error('Error in the response handler: %s, data: %s' % (str(e), json.dumps(response, indent=4)), exc_info=True) return
1
import json import logging import time from flask import current_app import pika import sqlalchemy import ujson from listenbrainz import utils from listenbrainz.db import stats as db_stats from listenbrainz.db import user as db_user from listenbrainz.db.exceptions import DatabaseException from listenbrainz.spark.handlers import (handle_candidate_sets, handle_dataframes, handle_dump_imported, handle_model, handle_recommendations, handle_user_daily_activity, handle_user_entity, handle_user_listening_activity, handle_sitewide_entity, notify_artist_relation_import, notify_mapping_import, handle_missing_musicbrainz_data, notify_cf_recording_recommendations_generation, handle_similar_users) from listenbrainz.webserver import create_app response_handler_map = { 'user_entity': handle_user_entity, 'user_listening_activity': handle_user_listening_activity, 'user_daily_activity': handle_user_daily_activity, 'sitewide_entity': handle_sitewide_entity, 'import_full_dump': handle_dump_imported, 'import_incremental_dump': handle_dump_imported, 'cf_recommendations_recording_dataframes': handle_dataframes, 'cf_recommendations_recording_model': handle_model, 'cf_recommendations_recording_candidate_sets': handle_candidate_sets, 'cf_recommendations_recording_recommendations': handle_recommendations, 'import_mapping': notify_mapping_import, 'import_artist_relation': notify_artist_relation_import, 'missing_musicbrainz_data': handle_missing_musicbrainz_data, 'cf_recommendations_recording_mail': notify_cf_recording_recommendations_generation, 'similar_users': handle_similar_users, } RABBITMQ_HEARTBEAT_TIME = 60 * 60 # 1 hour, in seconds class SparkReader: def __init__(self): self.app = create_app() # creating a flask app for config values and logging to Sentry def get_response_handler(self, response_type): return response_handler_map[response_type] def init_rabbitmq_connection(self): """ Initializes the connection to RabbitMQ. Note: this is a blocking function which keeps retrying if it fails to connect to RabbitMQ """ self.connection = utils.connect_to_rabbitmq( username=current_app.config['RABBITMQ_USERNAME'], password=current_app.config['RABBITMQ_PASSWORD'], host=current_app.config['RABBITMQ_HOST'], port=current_app.config['RABBITMQ_PORT'], virtual_host=current_app.config['RABBITMQ_VHOST'], error_logger=current_app.logger.error, heartbeat=RABBITMQ_HEARTBEAT_TIME, ) def process_response(self, response): try: response_type = response['type'] except KeyError: current_app.logger.error('Bad response sent to spark_reader: %s' % json.dumps(response, indent=4), exc_info=True) return try: response_handler = self.get_response_handler(response_type) except Exception: current_app.logger.error('Unknown response type: %s, doing nothing.' % response_type, exc_info=True) return try: response_handler(response) except Exception as e: current_app.logger.error('Error in the response handler: %s, data: %s %' (str(e), json.dumps(response, indent=4)), exc_info=True) return def callback(self, ch, method, properties, body): """ Handle the data received from the queue and insert into the database accordingly. """ current_app.logger.debug("Received a message, processing...") response = ujson.loads(body) self.process_response(response) ch.basic_ack(delivery_tag=method.delivery_tag) current_app.logger.debug("Done!") def start(self): """ initiates RabbitMQ connection and starts consuming from the queue """ with self.app.app_context(): current_app.logger.info('Spark consumer has started!') while True: self.init_rabbitmq_connection() self.incoming_ch = utils.create_channel_to_consume( connection=self.connection, exchange=current_app.config['SPARK_RESULT_EXCHANGE'], queue=current_app.config['SPARK_RESULT_QUEUE'], callback_function=self.callback, auto_ack=False, ) current_app.logger.info('Spark consumer attempt to start consuming!') try: self.incoming_ch.start_consuming() except pika.exceptions.ConnectionClosed: current_app.logger.warning('Spark consumer pika connection closed!') self.connection = None continue self.connection.close() if __name__ == '__main__': sr = SparkReader() sr.start()
1
19,652
remember that logger methods will do string interpolation automatically anyway, so you should be able to do `logger.error('message %s', var, exc_info=True)`
metabrainz-listenbrainz-server
py
@@ -350,6 +350,7 @@ private: void render_sync_handler_failed_default_exception_branch(t_function *tfunc); void render_sync_handler_send_exception_response(t_function *tfunc, const string &err_var); void render_service_call_structs(t_service* tservice); + void render_args_struct(t_function* tfunc); void render_result_value_struct(t_function* tfunc); string handler_successful_return_struct(t_function* tfunc);
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <string> #include <fstream> #include <iostream> #include "thrift/platform.h" #include "thrift/generate/t_generator.h" using std::map; using std::ofstream; using std::ostringstream; using std::string; using std::vector; using std::set; static const string endl("\n"); // avoid ostream << std::endl flushes static const string SERVICE_RESULT_VARIABLE("result_value"); static const string RESULT_STRUCT_SUFFIX("Result"); static const string RUST_RESERVED_WORDS[] = { "abstract", "alignof", "as", "become", "box", "break", "const", "continue", "crate", "do", "else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in", "let", "loop", "macro", "match", "mod", "move", "mut", "offsetof", "override", "priv", "proc", "pub", "pure", "ref", "return", "Self", "self", "sizeof", "static", "struct", "super", "trait", "true", "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield" }; const set<string> RUST_RESERVED_WORDS_SET( RUST_RESERVED_WORDS, RUST_RESERVED_WORDS + sizeof(RUST_RESERVED_WORDS)/sizeof(RUST_RESERVED_WORDS[0]) ); static const string SYNC_CLIENT_GENERIC_BOUND_VARS("<IP, OP>"); static const string SYNC_CLIENT_GENERIC_BOUNDS("where IP: TInputProtocol, OP: TOutputProtocol"); // FIXME: extract common TMessageIdentifier function // FIXME: have to_rust_type deal with Option class t_rs_generator : public t_generator { public: t_rs_generator( t_program* program, const std::map<std::string, std::string>&, const std::string& ) : t_generator(program) { gen_dir_ = get_out_dir(); } /** * Init and close methods */ void init_generator(); void close_generator(); /** * Program-level generation functions */ void generate_typedef(t_typedef* ttypedef); void generate_enum(t_enum* tenum); void generate_const(t_const* tconst); void generate_struct(t_struct* tstruct); void generate_xception(t_struct* txception); void generate_service(t_service* tservice); private: // struct type // T_REGULAR: user-defined struct in the IDL // T_ARGS: struct used to hold all service-call parameters // T_RESULT: struct used to hold all service-call returns and exceptions // T_EXCEPTION: user-defined exception in the IDL enum e_struct_type { T_REGULAR, T_ARGS, T_RESULT, T_EXCEPTION }; // Directory to which generated code is written. string gen_dir_; // File to which generated code is written. ofstream_with_content_based_conditional_update f_gen_; // Write the common compiler attributes and module includes to the top of the auto-generated file. void render_attributes_and_includes(); // Create the closure of Rust modules referenced by this service. void compute_service_referenced_modules(t_service *tservice, set<string> &referenced_modules); // Write the rust representation of an enum. void render_enum_definition(t_enum* tenum, const string& enum_name); // Write the impl blocks associated with the traits necessary to convert an enum to/from an i32. void render_enum_conversion(t_enum* tenum, const string& enum_name); // Write the impl block associated with the rust representation of an enum. This includes methods // to write the enum to a protocol, read it from a protocol, etc. void render_enum_impl(const string& enum_name); // Write a simple rust const value (ie. `pub const FOO: foo...`). void render_const_value(const string& name, t_type* ttype, t_const_value* tvalue); // Write a constant list, set, map or struct. These constants require allocation and cannot be defined // using a 'pub const'. As a result, I create a holder struct with a single `const_value` method that // returns the initialized instance. void render_const_value_holder(const string& name, t_type* ttype, t_const_value* tvalue); // Write the actual const value - the right side of a const definition. void render_const_value(t_type* ttype, t_const_value* tvalue); // Write a const struct (returned from `const_value` method). void render_const_struct(t_type* ttype, t_const_value* tvalue); // Write a const list (returned from `const_value` method). void render_const_list(t_type* ttype, t_const_value* tvalue); // Write a const set (returned from `const_value` method). void render_const_set(t_type* ttype, t_const_value* tvalue); // Write a const map (returned from `const_value` method). void render_const_map(t_type* ttype, t_const_value* tvalue); // Write the code to insert constant values into a rust vec or set. The // `insert_function` is the rust function that we'll use to insert the elements. void render_container_const_value( const string& insert_function, t_type* ttype, t_const_value* tvalue ); // Write the rust representation of a thrift struct to the generated file. Set `struct_type` to `T_ARGS` // if rendering the struct used to pack arguments for a service call. When `struct_type` is `T_ARGS` the // struct and its members have module visibility, and all fields are required. When `struct_type` is // anything else the struct and its members have public visibility and fields have the visibility set // in their definition. void render_struct(const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type); // Write the comment block preceding a type definition (and implementation). void render_type_comment(const string& struct_name); // Write the rust representation of a thrift struct. Supports argument structs, result structs, // user-defined structs and exception structs. The exact struct type to be generated is controlled // by the `struct_type` parameter, which (among other things) modifies the visibility of the // written struct and members, controls which trait implementations are generated. void render_struct_definition( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ); // Writes the impl block associated with the rust representation of a struct. At minimum this // contains the methods to read from a protocol and write to a protocol. Additional methods may // be generated depending on `struct_type`. void render_struct_impl( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ); // Generate a `fn new(...)` for a struct with name `struct_name` and type `t_struct`. The auto-generated // code may include generic type parameters to make the constructor more ergonomic. `struct_type` controls // the visibility of the generated constructor. void render_struct_constructor( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ); // Write the `ok_or` method added to all Thrift service call result structs. You can use this method // to convert a struct into a `Result` and use it in a `try!` or combinator chain. void render_result_struct_to_result_method(t_struct* tstruct); // Write the implementations for the `Error` and `Debug` traits. These traits are necessary for a // user-defined exception to be properly handled as Rust errors. void render_exception_struct_error_trait_impls(const string& struct_name, t_struct* tstruct); // Write the implementations for the `Default`. This trait allows you to specify only the fields you want // and use `..Default::default()` to fill in the rest. void render_struct_default_trait_impl(const string& struct_name, t_struct* tstruct); // Write the function that serializes a struct to its wire representation. If `struct_type` is `T_ARGS` // then all fields are considered "required", if not, the default optionality is used. void render_struct_sync_write(t_struct *tstruct, t_rs_generator::e_struct_type struct_type); // Helper function that serializes a single struct field to its wire representation. Unpacks the // variable (since it may be optional) and serializes according to the optionality rules required by `req`. // Variables in auto-generated code are passed by reference. Since this function may be called in // contexts where the variable is *already* a reference you can set `field_var_is_ref` to `true` to avoid // generating an extra, unnecessary `&` that the compiler will have to automatically dereference. void render_struct_field_sync_write( const string &field_var, bool field_var_is_ref, t_field *tfield, t_field::e_req req); // Write the rust function that serializes a single type (i.e. a i32 etc.) to its wire representation. // Variables in auto-generated code are passed by reference. Since this function may be called in // contexts where the variable is *already* a reference you can set `type_var_is_ref` to `true` to avoid // generating an extra, unnecessary `&` that the compiler will have to automatically dereference. void render_type_sync_write(const string &type_var, bool type_var_is_ref, t_type *ttype); // Write a list to the output protocol. `list_variable` is the variable containing the list // that will be written to the output protocol. // Variables in auto-generated code are passed by reference. Since this function may be called in // contexts where the variable is *already* a reference you can set `list_var_is_ref` to `true` to avoid // generating an extra, unnecessary `&` that the compiler will have to automatically dereference. void render_list_sync_write(const string &list_var, bool list_var_is_ref, t_list *tlist); // Write a set to the output protocol. `set_variable` is the variable containing the set that will // be written to the output protocol. // Variables in auto-generated code are passed by reference. Since this function may be called in // contexts where the variable is *already* a reference you can set `set_var_is_ref` to `true` to avoid // generating an extra, unnecessary `&` that the compiler will have to automatically dereference. void render_set_sync_write(const string &set_var, bool set_var_is_ref, t_set *tset); // Write a map to the output protocol. `map_variable` is the variable containing the map that will // be written to the output protocol. // Variables in auto-generated code are passed by reference. Since this function may be called in // contexts where the variable is *already* a reference you can set `map_var_is_ref` to `true` to avoid // generating an extra, unnecessary `&` that the compiler will have to automatically dereference. void render_map_sync_write(const string &map_var, bool map_var_is_ref, t_map *tset); // Return `true` if we need to dereference ths type when writing an element from a container. // Iterations on rust containers are performed as follows: `for v in &values { ... }` // where `v` has type `&RUST_TYPE` All defined functions take primitives by value, so, if the // rendered code is calling such a function it has to dereference `v`. bool needs_deref_on_container_write(t_type* ttype); // Return the variable (including all dereferences) required to write values from a rust container // to the output protocol. For example, if you were iterating through a container and using the temp // variable `v` to represent each element, then `ttype` is the type stored in the container and // `base_var` is "v". The return value is the actual string you will have to use to properly reference // the temp variable for writing to the output protocol. string string_container_write_variable(t_type* ttype, const string& base_var); // Write the code to read bytes from the wire into the given `t_struct`. `struct_name` is the // actual Rust name of the `t_struct`. If `struct_type` is `T_ARGS` then all struct fields are // necessary. Otherwise, the field's default optionality is used. void render_struct_sync_read(const string &struct_name, t_struct *tstruct, t_rs_generator::e_struct_type struct_type); // Write the rust function that deserializes a single type (i.e. i32 etc.) from its wire representation. // Set `is_boxed` to `true` if the resulting value should be wrapped in a `Box::new(...)`. void render_type_sync_read(const string &type_var, t_type *ttype, bool is_boxed = false); // Read the wire representation of a list and convert it to its corresponding rust implementation. // The deserialized list is stored in `list_variable`. void render_list_sync_read(t_list *tlist, const string &list_variable); // Read the wire representation of a set and convert it to its corresponding rust implementation. // The deserialized set is stored in `set_variable`. void render_set_sync_read(t_set *tset, const string &set_variable); // Read the wire representation of a map and convert it to its corresponding rust implementation. // The deserialized map is stored in `map_variable`. void render_map_sync_read(t_map *tmap, const string &map_variable); // Return a temporary variable used to store values when deserializing nested containers. string struct_field_read_temp_variable(t_field* tfield); // Top-level function that calls the various render functions necessary to write the rust representation // of a thrift union (i.e. an enum). void render_union(t_struct* tstruct); // Write the enum corresponding to the Thrift union. void render_union_definition(const string& union_name, t_struct* tstruct); // Write the enum impl (with read/write functions) for the Thrift union. void render_union_impl(const string& union_name, t_struct* tstruct); // Write the `ENUM::write_to_out_protocol` function. void render_union_sync_write(const string &union_name, t_struct *tstruct); // Write the `ENUM::read_from_in_protocol` function. void render_union_sync_read(const string &union_name, t_struct *tstruct); // Top-level function that calls the various render functions necessary to write the rust representation // of a Thrift client. void render_sync_client(t_service* tservice); // Write the trait with the service-call methods for `tservice`. void render_sync_client_trait(t_service *tservice); // Write the trait to be implemented by the client impl if end users can use it to make service calls. void render_sync_client_marker_trait(t_service *tservice); // Write the code to create the Thrift service sync client struct and its matching 'impl' block. void render_sync_client_definition_and_impl(const string& client_impl_name); // Write the code to create the `SyncClient::new` functions as well as any other functions // callers would like to use on the Thrift service sync client. void render_sync_client_lifecycle_functions(const string& client_struct); // Write the code to create the impl block for the `TThriftClient` trait. Since generated // Rust Thrift clients perform all their operations using methods defined in this trait, we // have to implement it for the client structs. void render_sync_client_tthriftclient_impl(const string &client_impl_name); // Write the marker traits for any service(s) being extended, including the one for the current // service itself (i.e. `tservice`) void render_sync_client_marker_trait_impls(t_service *tservice, const string &impl_struct_name); // Generate a list of all the traits this Thrift client struct extends. string sync_client_marker_traits_for_extension(t_service *tservice); // Top-level function that writes the code to make the Thrift service calls. void render_sync_client_process_impl(t_service* tservice); // Write the actual function that calls out to the remote service and processes its response. void render_sync_send_recv_wrapper(t_function* tfunc); // Write the `send` functionality for a Thrift service call represented by a `t_service->t_function`. void render_sync_send(t_function* tfunc); // Write the `recv` functionality for a Thrift service call represented by a `t_service->t_function`. // This method is only rendered if the function is *not* oneway. void render_sync_recv(t_function* tfunc); void render_sync_processor(t_service *tservice); void render_sync_handler_trait(t_service *tservice); void render_sync_processor_definition_and_impl(t_service *tservice); void render_sync_process_delegation_functions(t_service *tservice); void render_sync_process_function(t_function *tfunc, const string &handler_type); void render_process_match_statements(t_service* tservice); void render_sync_handler_succeeded(t_function *tfunc); void render_sync_handler_failed(t_function *tfunc); void render_sync_handler_failed_user_exception_branch(t_function *tfunc); void render_sync_handler_failed_application_exception_branch(t_function *tfunc, const string &app_err_var); void render_sync_handler_failed_default_exception_branch(t_function *tfunc); void render_sync_handler_send_exception_response(t_function *tfunc, const string &err_var); void render_service_call_structs(t_service* tservice); void render_result_value_struct(t_function* tfunc); string handler_successful_return_struct(t_function* tfunc); // Writes the result of `render_rift_error_struct` wrapped in an `Err(thrift::Error(...))`. void render_rift_error( const string& error_kind, const string& error_struct, const string& sub_error_kind, const string& error_message ); // Write a thrift::Error variant struct. Error structs take the form: // ``` // pub struct error_struct { // kind: sub_error_kind, // message: error_message, // } // ``` // A concrete example is: // ``` // pub struct ApplicationError { // kind: ApplicationErrorKind::Unknown, // message: "This is some error message", // } // ``` void render_rift_error_struct( const string& error_struct, const string& sub_error_kind, const string& error_message ); // Return a string containing all the unpacked service call args given a service call function // `t_function`. Prepends the args with either `&mut self` or `&self` and includes the arg types // in the returned string, for example: // `fn foo(&mut self, field_0: String)`. string rust_sync_service_call_declaration(t_function* tfunc, bool self_is_mutable); // Return a string containing all the unpacked service call args given a service call function // `t_function`. Only includes the arg names, each of which is prefixed with the optional prefix // `field_prefix`, for example: `self.field_0`. string rust_sync_service_call_invocation(t_function* tfunc, const string& field_prefix = ""); // Return a string containing all fields in the struct `tstruct` for use in a function declaration. // Each field is followed by its type, for example: `field_0: String`. string struct_to_declaration(t_struct* tstruct, t_rs_generator::e_struct_type struct_type); // Return a string containing all fields in the struct `tstruct` for use in a function call, // for example: `field_0: String`. string struct_to_invocation(t_struct* tstruct, const string& field_prefix = ""); // Write the documentation for a struct, service-call or other documentation-annotated element. void render_rustdoc(t_doc* tdoc); // Return `true` if the true type of `ttype` is a thrift double, `false` otherwise. bool is_double(t_type* ttype); // Return a string representing the rust type given a `t_type`. string to_rust_type(t_type* ttype, bool ordered_float = true); // Return a string representing the rift `protocol::TType` given a `t_type`. string to_rust_field_type_enum(t_type* ttype); // Return the default value to be used when initializing a struct field which has `OPT_IN_REQ_OUT` // optionality. string opt_in_req_out_value(t_type* ttype); // Return `true` if we can write a const of the form `pub const FOO: ...`. bool can_generate_simple_const(t_type* ttype); // Return `true` if we cannot write a standard Rust constant (because the type needs some allocation). bool can_generate_const_holder(t_type* ttype); // Return `true` if this type is a void, and should be represented by the rust `()` type. bool is_void(t_type* ttype); t_field::e_req actual_field_req(t_field* tfield, t_rs_generator::e_struct_type struct_type); // Return `true` if this `t_field::e_req` is either `t_field::T_OPTIONAL` or `t_field::T_OPT_IN_REQ_OUT` // and needs to be wrapped by an `Option<TYPE_NAME>`, `false` otherwise. bool is_optional(t_field::e_req req); // Return `true` if the service call has arguments, `false` otherwise. bool has_args(t_function* tfunc); // Return `true` if a service call has non-`()` arguments, `false` otherwise. bool has_non_void_args(t_function* tfunc); // Return `pub ` (notice trailing whitespace!) if the struct should be public, `` (empty string) otherwise. string visibility_qualifier(t_rs_generator::e_struct_type struct_type); // Returns the namespace prefix for a given Thrift service. If the type is defined in the presently-computed // Thrift program, then an empty string is returned. string rust_namespace(t_service* tservice); // Returns the namespace prefix for a given Thrift type. If the type is defined in the presently-computed // Thrift program, then an empty string is returned. string rust_namespace(t_type* ttype); // Returns the camel-cased name for a Rust struct type. Handles the case where `tstruct->get_name()` is // a reserved word. string rust_struct_name(t_struct* tstruct); // Returns the snake-cased name for a Rust field or local variable. Handles the case where // `tfield->get_name()` is a reserved word. string rust_field_name(t_field* tstruct); // Returns the camel-cased name for a Rust union type. Handles the case where `tstruct->get_name()` is // a reserved word. string rust_union_field_name(t_field* tstruct); // Converts any variable name into a 'safe' variant that does not clash with any Rust reserved keywords. string rust_safe_name(const string& name); // Return `true` if the name is a reserved Rust keyword, `false` otherwise. bool is_reserved(const string& name); // Return the name of the function that users will invoke to make outgoing service calls. string service_call_client_function_name(t_function* tfunc); // Return the name of the function that users will have to implement to handle incoming service calls. string service_call_handler_function_name(t_function* tfunc); // Return the name of the struct used to pack the return value // and user-defined exceptions for the thrift service call. string service_call_result_struct_name(t_function* tfunc); string rust_sync_client_marker_trait_name(t_service* tservice); // Return the trait name for the sync service client given a `t_service`. string rust_sync_client_trait_name(t_service* tservice); // Return the name for the sync service client struct given a `t_service`. string rust_sync_client_impl_name(t_service* tservice); // Return the trait name that users will have to implement for the server half of a Thrift service. string rust_sync_handler_trait_name(t_service* tservice); // Return the struct name for the server half of a Thrift service. string rust_sync_processor_name(t_service* tservice); // Return the struct name for the struct that contains all the service-call implementations for // the server half of a Thrift service. string rust_sync_processor_impl_name(t_service *tservice); // Properly uppercase names for use in Rust. string rust_upper_case(const string& name); // Snake-case field, parameter and function names and make them Rust friendly. string rust_snake_case(const string& name); // Camel-case type/variant names and make them Rust friendly. string rust_camel_case(const string& name); // Replace all instances of `search_string` with `replace_string` in `target`. void string_replace(string& target, const string& search_string, const string& replace_string); }; void t_rs_generator::init_generator() { // make output directory for this thrift program MKDIR(gen_dir_.c_str()); // create the file into which we're going to write the generated code string f_gen_name = gen_dir_ + "/" + rust_snake_case(get_program()->get_name()) + ".rs"; f_gen_.open(f_gen_name.c_str()); // header comment f_gen_ << "// " << autogen_summary() << endl; f_gen_ << "// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING" << endl; f_gen_ << endl; render_attributes_and_includes(); } void t_rs_generator::render_attributes_and_includes() { // turn off some compiler/clippy warnings // code always includes BTreeMap/BTreeSet/OrderedFloat f_gen_ << "#![allow(unused_imports)]" << endl; // code might not include imports from crates f_gen_ << "#![allow(unused_extern_crates)]" << endl; // constructors take *all* struct parameters, which can trigger the "too many arguments" warning // some auto-gen'd types can be deeply nested. clippy recommends factoring them out which is hard to autogen f_gen_ << "#![cfg_attr(feature = \"cargo-clippy\", allow(too_many_arguments, type_complexity))]" << endl; // prevent rustfmt from running against this file // lines are too long, code is (thankfully!) not visual-indented, etc. f_gen_ << "#![cfg_attr(rustfmt, rustfmt_skip)]" << endl; f_gen_ << endl; // add standard includes f_gen_ << "extern crate ordered_float;" << endl; f_gen_ << "extern crate thrift;" << endl; f_gen_ << "extern crate try_from;" << endl; f_gen_ << endl; f_gen_ << "use ordered_float::OrderedFloat;" << endl; f_gen_ << "use std::cell::RefCell;" << endl; f_gen_ << "use std::collections::{BTreeMap, BTreeSet};" << endl; f_gen_ << "use std::convert::From;" << endl; f_gen_ << "use std::default::Default;" << endl; f_gen_ << "use std::error::Error;" << endl; f_gen_ << "use std::fmt;" << endl; f_gen_ << "use std::fmt::{Display, Formatter};" << endl; f_gen_ << "use std::rc::Rc;" << endl; f_gen_ << "use try_from::TryFrom;" << endl; f_gen_ << endl; f_gen_ << "use thrift::{ApplicationError, ApplicationErrorKind, ProtocolError, ProtocolErrorKind, TThriftClient};" << endl; f_gen_ << "use thrift::protocol::{TFieldIdentifier, TListIdentifier, TMapIdentifier, TMessageIdentifier, TMessageType, TInputProtocol, TOutputProtocol, TSetIdentifier, TStructIdentifier, TType};" << endl; f_gen_ << "use thrift::protocol::field_id;" << endl; f_gen_ << "use thrift::protocol::verify_expected_message_type;" << endl; f_gen_ << "use thrift::protocol::verify_expected_sequence_number;" << endl; f_gen_ << "use thrift::protocol::verify_expected_service_call;" << endl; f_gen_ << "use thrift::protocol::verify_required_field_exists;" << endl; f_gen_ << "use thrift::server::TProcessor;" << endl; f_gen_ << endl; // add all the program includes // NOTE: this is more involved than you would expect because of service extension // Basically, I have to find the closure of all the services and include their modules at the top-level set<string> referenced_modules; // first, start by adding explicit thrift includes const vector<t_program*> includes = get_program()->get_includes(); vector<t_program*>::const_iterator includes_iter; for(includes_iter = includes.begin(); includes_iter != includes.end(); ++includes_iter) { referenced_modules.insert((*includes_iter)->get_name()); } // next, recursively iterate through all the services and add the names of any programs they reference const vector<t_service*> services = get_program()->get_services(); vector<t_service*>::const_iterator service_iter; for (service_iter = services.begin(); service_iter != services.end(); ++service_iter) { compute_service_referenced_modules(*service_iter, referenced_modules); } // finally, write all the "pub use..." declarations if (!referenced_modules.empty()) { set<string>::iterator module_iter; for (module_iter = referenced_modules.begin(); module_iter != referenced_modules.end(); ++module_iter) { f_gen_ << "use " << rust_snake_case(*module_iter) << ";" << endl; } f_gen_ << endl; } } void t_rs_generator::compute_service_referenced_modules( t_service *tservice, set<string> &referenced_modules ) { t_service* extends = tservice->get_extends(); if (extends) { if (extends->get_program() != get_program()) { referenced_modules.insert(extends->get_program()->get_name()); } compute_service_referenced_modules(extends, referenced_modules); } } void t_rs_generator::close_generator() { f_gen_.close(); } //----------------------------------------------------------------------------- // // Consts // // NOTE: consider using macros to generate constants // //----------------------------------------------------------------------------- // This is worse than it should be because constants // aren't (sensibly) limited to scalar types void t_rs_generator::generate_const(t_const* tconst) { string name = tconst->get_name(); t_type* ttype = tconst->get_type(); t_const_value* tvalue = tconst->get_value(); if (can_generate_simple_const(ttype)) { render_const_value(name, ttype, tvalue); } else if (can_generate_const_holder(ttype)) { render_const_value_holder(name, ttype, tvalue); } else { throw "cannot generate const for " + name; } } void t_rs_generator::render_const_value(const string& name, t_type* ttype, t_const_value* tvalue) { if (!can_generate_simple_const(ttype)) { throw "cannot generate simple rust constant for " + ttype->get_name(); } f_gen_ << "pub const " << rust_upper_case(name) << ": " << to_rust_type(ttype) << " = "; render_const_value(ttype, tvalue); f_gen_ << ";" << endl; f_gen_ << endl; } void t_rs_generator::render_const_value_holder(const string& name, t_type* ttype, t_const_value* tvalue) { if (!can_generate_const_holder(ttype)) { throw "cannot generate constant holder for " + ttype->get_name(); } string holder_name("Const" + rust_camel_case(name)); f_gen_ << indent() << "pub struct " << holder_name << ";" << endl; f_gen_ << indent() << "impl " << holder_name << " {" << endl; indent_up(); f_gen_ << indent() << "pub fn const_value() -> " << to_rust_type(ttype) << " {" << endl; indent_up(); render_const_value(ttype, tvalue); indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_const_value(t_type* ttype, t_const_value* tvalue) { if (ttype->is_base_type()) { t_base_type* tbase_type = (t_base_type*)ttype; switch (tbase_type->get_base()) { case t_base_type::TYPE_STRING: if (tbase_type->is_binary()) { f_gen_ << "\"" << tvalue->get_string() << "\""<< ".to_owned().into_bytes()"; } else { f_gen_ << "\"" << tvalue->get_string() << "\""<< ".to_owned()"; } break; case t_base_type::TYPE_BOOL: f_gen_ << (tvalue->get_integer() ? "true" : "false"); break; case t_base_type::TYPE_I8: case t_base_type::TYPE_I16: case t_base_type::TYPE_I32: case t_base_type::TYPE_I64: f_gen_ << tvalue->get_integer(); break; case t_base_type::TYPE_DOUBLE: f_gen_ << "OrderedFloat::from(" << tvalue->get_double() << " as f64)"; break; default: throw "cannot generate const value for " + t_base_type::t_base_name(tbase_type->get_base()); } } else if (ttype->is_typedef()) { render_const_value(get_true_type(ttype), tvalue); } else if (ttype->is_enum()) { f_gen_ << indent() << "{" << endl; indent_up(); f_gen_ << indent() << to_rust_type(ttype) << "::try_from(" << tvalue->get_integer() << ").expect(\"expecting valid const value\")" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } else if (ttype->is_struct() || ttype->is_xception()) { render_const_struct(ttype, tvalue); } else if (ttype->is_container()) { f_gen_ << indent() << "{" << endl; indent_up(); if (ttype->is_list()) { render_const_list(ttype, tvalue); } else if (ttype->is_set()) { render_const_set(ttype, tvalue); } else if (ttype->is_map()) { render_const_map(ttype, tvalue); } else { throw "cannot generate const container value for " + ttype->get_name(); } indent_down(); f_gen_ << indent() << "}" << endl; } else { throw "cannot generate const value for " + ttype->get_name(); } } void t_rs_generator::render_const_struct(t_type* ttype, t_const_value*) { if (((t_struct*)ttype)->is_union()) { f_gen_ << indent() << "{" << endl; indent_up(); f_gen_ << indent() << "unimplemented!()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } else { f_gen_ << indent() << "{" << endl; indent_up(); f_gen_ << indent() << "unimplemented!()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } } void t_rs_generator::render_const_list(t_type* ttype, t_const_value* tvalue) { t_type* elem_type = ((t_list*)ttype)->get_elem_type(); f_gen_ << indent() << "let mut l: Vec<" << to_rust_type(elem_type) << "> = Vec::new();" << endl; const vector<t_const_value*>& elems = tvalue->get_list(); vector<t_const_value*>::const_iterator elem_iter; for(elem_iter = elems.begin(); elem_iter != elems.end(); ++elem_iter) { t_const_value* elem_value = (*elem_iter); render_container_const_value("l.push", elem_type, elem_value); } f_gen_ << indent() << "l" << endl; } void t_rs_generator::render_const_set(t_type* ttype, t_const_value* tvalue) { t_type* elem_type = ((t_set*)ttype)->get_elem_type(); f_gen_ << indent() << "let mut s: BTreeSet<" << to_rust_type(elem_type) << "> = BTreeSet::new();" << endl; const vector<t_const_value*>& elems = tvalue->get_list(); vector<t_const_value*>::const_iterator elem_iter; for(elem_iter = elems.begin(); elem_iter != elems.end(); ++elem_iter) { t_const_value* elem_value = (*elem_iter); render_container_const_value("s.insert", elem_type, elem_value); } f_gen_ << indent() << "s" << endl; } void t_rs_generator::render_const_map(t_type* ttype, t_const_value* tvalue) { t_type* key_type = ((t_map*)ttype)->get_key_type(); t_type* val_type = ((t_map*)ttype)->get_val_type(); f_gen_ << indent() << "let mut m: BTreeMap<" << to_rust_type(key_type) << ", " << to_rust_type(val_type) << "> = BTreeMap::new();" << endl; const map<t_const_value*, t_const_value*, t_const_value::value_compare>& elems = tvalue->get_map(); map<t_const_value*, t_const_value*, t_const_value::value_compare>::const_iterator elem_iter; for (elem_iter = elems.begin(); elem_iter != elems.end(); ++elem_iter) { t_const_value* key_value = elem_iter->first; t_const_value* val_value = elem_iter->second; if (get_true_type(key_type)->is_base_type()) { f_gen_ << indent() << "let k = "; render_const_value(key_type, key_value); f_gen_ << ";" << endl; } else { f_gen_ << indent() << "let k = {" << endl; indent_up(); render_const_value(key_type, key_value); indent_down(); f_gen_ << indent() << "};" << endl; } if (get_true_type(val_type)->is_base_type()) { f_gen_ << indent() << "let v = "; render_const_value(val_type, val_value); f_gen_ << ";" << endl; } else { f_gen_ << indent() << "let v = {" << endl; indent_up(); render_const_value(val_type, val_value); indent_down(); f_gen_ << indent() << "};" << endl; } f_gen_ << indent() << "m.insert(k, v);" << endl; } f_gen_ << indent() << "m" << endl; } void t_rs_generator::render_container_const_value( const string& insert_function, t_type* ttype, t_const_value* tvalue ) { if (get_true_type(ttype)->is_base_type()) { f_gen_ << indent() << insert_function << "("; render_const_value(ttype, tvalue); f_gen_ << ");" << endl; } else { f_gen_ << indent() << insert_function << "(" << endl; indent_up(); render_const_value(ttype, tvalue); indent_down(); f_gen_ << indent() << ");" << endl; } } //----------------------------------------------------------------------------- // // Typedefs // //----------------------------------------------------------------------------- void t_rs_generator::generate_typedef(t_typedef* ttypedef) { std::string actual_type = to_rust_type(ttypedef->get_type()); f_gen_ << "pub type " << rust_safe_name(ttypedef->get_symbolic()) << " = " << actual_type << ";" << endl; f_gen_ << endl; } //----------------------------------------------------------------------------- // // Enums // //----------------------------------------------------------------------------- void t_rs_generator::generate_enum(t_enum* tenum) { string enum_name(rust_camel_case(tenum->get_name())); render_enum_definition(tenum, enum_name); render_enum_impl(enum_name); render_enum_conversion(tenum, enum_name); } void t_rs_generator::render_enum_definition(t_enum* tenum, const string& enum_name) { render_rustdoc((t_doc*) tenum); f_gen_ << "#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]" << endl; f_gen_ << "pub enum " << enum_name << " {" << endl; indent_up(); vector<t_enum_value*> constants = tenum->get_constants(); vector<t_enum_value*>::iterator constants_iter; for (constants_iter = constants.begin(); constants_iter != constants.end(); ++constants_iter) { t_enum_value* val = (*constants_iter); render_rustdoc((t_doc*) val); f_gen_ << indent() << uppercase(val->get_name()) << " = " << val->get_value() << "," << endl; } indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_enum_impl(const string& enum_name) { f_gen_ << "impl " << enum_name << " {" << endl; indent_up(); f_gen_ << indent() << "pub fn write_to_out_protocol(&self, o_prot: &mut TOutputProtocol) -> thrift::Result<()> {" << endl; indent_up(); f_gen_ << indent() << "o_prot.write_i32(*self as i32)" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << indent() << "pub fn read_from_in_protocol(i_prot: &mut TInputProtocol) -> thrift::Result<" << enum_name << "> {" << endl; indent_up(); f_gen_ << indent() << "let enum_value = i_prot.read_i32()?;" << endl; f_gen_ << indent() << enum_name << "::try_from(enum_value)"; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_enum_conversion(t_enum* tenum, const string& enum_name) { f_gen_ << "impl TryFrom<i32> for " << enum_name << " {" << endl; indent_up(); f_gen_ << indent() << "type Err = thrift::Error;"; f_gen_ << indent() << "fn try_from(i: i32) -> Result<Self, Self::Err> {" << endl; indent_up(); f_gen_ << indent() << "match i {" << endl; indent_up(); vector<t_enum_value*> constants = tenum->get_constants(); vector<t_enum_value*>::iterator constants_iter; for (constants_iter = constants.begin(); constants_iter != constants.end(); ++constants_iter) { t_enum_value* val = (*constants_iter); f_gen_ << indent() << val->get_value() << " => Ok(" << enum_name << "::" << uppercase(val->get_name()) << ")," << endl; } f_gen_ << indent() << "_ => {" << endl; indent_up(); render_rift_error( "Protocol", "ProtocolError", "ProtocolErrorKind::InvalidData", "format!(\"cannot convert enum constant {} to " + enum_name + "\", i)" ); indent_down(); f_gen_ << indent() << "}," << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } //----------------------------------------------------------------------------- // // Structs, Unions and Exceptions // //----------------------------------------------------------------------------- void t_rs_generator::generate_xception(t_struct* txception) { render_struct(rust_struct_name(txception), txception, t_rs_generator::T_EXCEPTION); } void t_rs_generator::generate_struct(t_struct* tstruct) { if (tstruct->is_union()) { render_union(tstruct); } else if (tstruct->is_struct()) { render_struct(rust_struct_name(tstruct), tstruct, t_rs_generator::T_REGULAR); } else { throw "cannot generate struct for exception"; } } void t_rs_generator::render_struct( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ) { render_type_comment(struct_name); render_struct_definition(struct_name, tstruct, struct_type); render_struct_impl(struct_name, tstruct, struct_type); if (struct_type == t_rs_generator::T_REGULAR || struct_type == t_rs_generator::T_EXCEPTION) { render_struct_default_trait_impl(struct_name, tstruct); } if (struct_type == t_rs_generator::T_EXCEPTION) { render_exception_struct_error_trait_impls(struct_name, tstruct); } } void t_rs_generator::render_struct_definition( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ) { render_rustdoc((t_doc*) tstruct); f_gen_ << "#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]" << endl; f_gen_ << visibility_qualifier(struct_type) << "struct " << struct_name << " {" << endl; // render the members vector<t_field*> members = tstruct->get_sorted_members(); if (!members.empty()) { indent_up(); vector<t_field*>::iterator members_iter; for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = actual_field_req(member, struct_type); string rust_type = to_rust_type(member->get_type()); rust_type = is_optional(member_req) ? "Option<" + rust_type + ">" : rust_type; render_rustdoc((t_doc*) member); f_gen_ << indent() << visibility_qualifier(struct_type) << rust_field_name(member) << ": " << rust_type << "," << endl; } indent_down(); } f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_exception_struct_error_trait_impls(const string& struct_name, t_struct* tstruct) { // error::Error trait f_gen_ << "impl Error for " << struct_name << " {" << endl; indent_up(); f_gen_ << indent() << "fn description(&self) -> &str {" << endl; indent_up(); f_gen_ << indent() << "\"" << "remote service threw " << tstruct->get_name() << "\"" << endl; // use *original* name indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; // convert::From trait f_gen_ << "impl From<" << struct_name << "> for thrift::Error {" << endl; indent_up(); f_gen_ << indent() << "fn from(e: " << struct_name << ") -> Self {" << endl; indent_up(); f_gen_ << indent() << "thrift::Error::User(Box::new(e))" << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; // fmt::Display trait f_gen_ << "impl Display for " << struct_name << " {" << endl; indent_up(); f_gen_ << indent() << "fn fmt(&self, f: &mut Formatter) -> fmt::Result {" << endl; indent_up(); f_gen_ << indent() << "self.description().fmt(f)" << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_struct_default_trait_impl(const string& struct_name, t_struct* tstruct) { bool has_required_field = false; const vector<t_field*>& members = tstruct->get_sorted_members(); vector<t_field*>::const_iterator members_iter; for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = *members_iter; if (!is_optional(member->get_req())) { has_required_field = true; break; } } if (has_required_field) { return; } f_gen_ << "impl Default for " << struct_name << " {" << endl; indent_up(); f_gen_ << indent() << "fn default() -> Self {" << endl; indent_up(); if (members.empty()) { f_gen_ << indent() << struct_name << "{}" << endl; } else { f_gen_ << indent() << struct_name << "{" << endl; indent_up(); for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field *member = (*members_iter); string member_name(rust_field_name(member)); f_gen_ << indent() << member_name << ": " << opt_in_req_out_value(member->get_type()) << "," << endl; } indent_down(); f_gen_ << indent() << "}" << endl; } indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_struct_impl( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ) { f_gen_ << "impl " << struct_name << " {" << endl; indent_up(); if (struct_type == t_rs_generator::T_REGULAR || struct_type == t_rs_generator::T_EXCEPTION) { render_struct_constructor(struct_name, tstruct, struct_type); } render_struct_sync_read(struct_name, tstruct, struct_type); render_struct_sync_write(tstruct, struct_type); if (struct_type == t_rs_generator::T_RESULT) { render_result_struct_to_result_method(tstruct); } indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_struct_constructor( const string& struct_name, t_struct* tstruct, t_rs_generator::e_struct_type struct_type ) { const vector<t_field*>& members = tstruct->get_sorted_members(); vector<t_field*>::const_iterator members_iter; // build the convenience type parameters that allows us to pass unwrapped values to a constructor and // have them automatically converted into Option<value> bool first_arg = true; ostringstream generic_type_parameters; ostringstream generic_type_qualifiers; for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = actual_field_req(member, struct_type); if (is_optional(member_req)) { if (first_arg) { first_arg = false; } else { generic_type_parameters << ", "; generic_type_qualifiers << ", "; } generic_type_parameters << "F" << member->get_key(); generic_type_qualifiers << "F" << member->get_key() << ": Into<Option<" << to_rust_type(member->get_type()) << ">>"; } } string type_parameter_string = generic_type_parameters.str(); if (type_parameter_string.length() != 0) { type_parameter_string = "<" + type_parameter_string + ">"; } string type_qualifier_string = generic_type_qualifiers.str(); if (type_qualifier_string.length() != 0) { type_qualifier_string = "where " + type_qualifier_string + " "; } // now build the actual constructor arg list // when we're building this list we have to use the type parameters in place of the actual type names // if necessary ostringstream args; first_arg = true; for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = actual_field_req(member, struct_type); string member_name(rust_field_name(member)); if (first_arg) { first_arg = false; } else { args << ", "; } if (is_optional(member_req)) { args << member_name << ": " << "F" << member->get_key(); } else { args << member_name << ": " << to_rust_type(member->get_type()); } } string arg_string = args.str(); string visibility(visibility_qualifier(struct_type)); f_gen_ << indent() << visibility << "fn new" << type_parameter_string << "(" << arg_string << ") -> " << struct_name << " " << type_qualifier_string << "{" << endl; indent_up(); if (members.size() == 0) { f_gen_ << indent() << struct_name << " {}" << endl; } else { f_gen_ << indent() << struct_name << " {" << endl; indent_up(); for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = actual_field_req(member, struct_type); string member_name(rust_field_name(member)); if (is_optional(member_req)) { f_gen_ << indent() << member_name << ": " << member_name << ".into()," << endl; } else { f_gen_ << indent() << member_name << ": " << member_name << "," << endl; } } indent_down(); f_gen_ << indent() << "}" << endl; } indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_result_struct_to_result_method(t_struct* tstruct) { // we don't use the rust struct name in this method, just the service call name string service_call_name = tstruct->get_name(); // check that we actually have a result size_t index = service_call_name.find(RESULT_STRUCT_SUFFIX, 0); if (index == std::string::npos) { throw "result struct " + service_call_name + " missing result suffix"; } else { service_call_name.replace(index, 6, ""); } const vector<t_field*>& members = tstruct->get_sorted_members(); vector<t_field*>::const_iterator members_iter; // find out what the call's expected return type was string rust_return_type = "()"; for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); if (member->get_name() == SERVICE_RESULT_VARIABLE) { // don't have to check safe name here rust_return_type = to_rust_type(member->get_type()); break; } } // NOTE: ideally I would generate the branches and render them separately // I tried this however, and the resulting code was harder to understand // maintaining a rendered branch count (while a little ugly) got me the // rendering I wanted with code that was reasonably understandable f_gen_ << indent() << "fn ok_or(self) -> thrift::Result<" << rust_return_type << "> {" << endl; indent_up(); int rendered_branch_count = 0; // render the exception branches for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* tfield = (*members_iter); if (tfield->get_name() != SERVICE_RESULT_VARIABLE) { // don't have to check safe name here string field_name("self." + rust_field_name(tfield)); string branch_statement = rendered_branch_count == 0 ? "if" : "} else if"; f_gen_ << indent() << branch_statement << " " << field_name << ".is_some() {" << endl; indent_up(); f_gen_ << indent() << "Err(thrift::Error::User(Box::new(" << field_name << ".unwrap())))" << endl; indent_down(); rendered_branch_count++; } } // render the return value branches if (rust_return_type == "()") { if (rendered_branch_count == 0) { // we have the unit return and this service call has no user-defined // exceptions. this means that we've a trivial return (happens with oneways) f_gen_ << indent() << "Ok(())" << endl; } else { // we have the unit return, but there are user-defined exceptions // if we've gotten this far then we have the default return (i.e. call successful) f_gen_ << indent() << "} else {" << endl; indent_up(); f_gen_ << indent() << "Ok(())" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } } else { string branch_statement = rendered_branch_count == 0 ? "if" : "} else if"; f_gen_ << indent() << branch_statement << " self." << SERVICE_RESULT_VARIABLE << ".is_some() {" << endl; indent_up(); f_gen_ << indent() << "Ok(self." << SERVICE_RESULT_VARIABLE << ".unwrap())" << endl; indent_down(); f_gen_ << indent() << "} else {" << endl; indent_up(); // if we haven't found a valid return value *or* a user exception // then we're in trouble; return a default error render_rift_error( "Application", "ApplicationError", "ApplicationErrorKind::MissingResult", "\"no result received for " + service_call_name + "\"" ); indent_down(); f_gen_ << indent() << "}" << endl; } indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_union(t_struct* tstruct) { string union_name(rust_struct_name(tstruct)); render_type_comment(union_name); render_union_definition(union_name, tstruct); render_union_impl(union_name, tstruct); } void t_rs_generator::render_union_definition(const string& union_name, t_struct* tstruct) { const vector<t_field*>& members = tstruct->get_sorted_members(); if (members.empty()) { throw "cannot generate rust enum with 0 members"; // may be valid thrift, but it's invalid rust } f_gen_ << "#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]" << endl; f_gen_ << "pub enum " << union_name << " {" << endl; indent_up(); vector<t_field*>::const_iterator member_iter; for(member_iter = members.begin(); member_iter != members.end(); ++member_iter) { t_field* tfield = (*member_iter); f_gen_ << indent() << rust_union_field_name(tfield) << "(" << to_rust_type(tfield->get_type()) << ")," << endl; } indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_union_impl(const string& union_name, t_struct* tstruct) { f_gen_ << "impl " << union_name << " {" << endl; indent_up(); render_union_sync_read(union_name, tstruct); render_union_sync_write(union_name, tstruct); indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } //----------------------------------------------------------------------------- // // Sync Struct Write // //----------------------------------------------------------------------------- void t_rs_generator::render_struct_sync_write( t_struct *tstruct, t_rs_generator::e_struct_type struct_type ) { f_gen_ << indent() << visibility_qualifier(struct_type) << "fn write_to_out_protocol(&self, o_prot: &mut TOutputProtocol) -> thrift::Result<()> {" << endl; indent_up(); // write struct header to output protocol // note: use the *original* struct name here f_gen_ << indent() << "let struct_ident = TStructIdentifier::new(\"" + tstruct->get_name() + "\");" << endl; f_gen_ << indent() << "o_prot.write_struct_begin(&struct_ident)?;" << endl; // write struct members to output protocol vector<t_field*> members = tstruct->get_sorted_members(); if (!members.empty()) { vector<t_field*>::iterator members_iter; for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = actual_field_req(member, struct_type); string member_var("self." + rust_field_name(member)); render_struct_field_sync_write(member_var, false, member, member_req); } } // write struct footer to output protocol f_gen_ << indent() << "o_prot.write_field_stop()?;" << endl; f_gen_ << indent() << "o_prot.write_struct_end()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_union_sync_write(const string &union_name, t_struct *tstruct) { f_gen_ << indent() << "pub fn write_to_out_protocol(&self, o_prot: &mut TOutputProtocol) -> thrift::Result<()> {" << endl; indent_up(); // write struct header to output protocol // note: use the *original* struct name here f_gen_ << indent() << "let struct_ident = TStructIdentifier::new(\"" + tstruct->get_name() + "\");" << endl; f_gen_ << indent() << "o_prot.write_struct_begin(&struct_ident)?;" << endl; // write the enum field to the output protocol vector<t_field*> members = tstruct->get_sorted_members(); if (!members.empty()) { f_gen_ << indent() << "match *self {" << endl; indent_up(); vector<t_field*>::iterator members_iter; for(members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = t_field::T_REQUIRED; t_type* ttype = member->get_type(); string match_var((ttype->is_base_type() && !ttype->is_string()) ? "f" : "ref f"); f_gen_ << indent() << union_name << "::" << rust_union_field_name(member) << "(" << match_var << ") => {" << endl; indent_up(); render_struct_field_sync_write("f", true, member, member_req); indent_down(); f_gen_ << indent() << "}," << endl; } indent_down(); f_gen_ << indent() << "}" << endl; } // write struct footer to output protocol f_gen_ << indent() << "o_prot.write_field_stop()?;" << endl; f_gen_ << indent() << "o_prot.write_struct_end()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_struct_field_sync_write( const string &field_var, bool field_var_is_ref, t_field *tfield, t_field::e_req req ) { t_type* field_type = tfield->get_type(); t_type* actual_type = get_true_type(field_type); ostringstream field_stream; field_stream << "TFieldIdentifier::new(" << "\"" << tfield->get_name() << "\"" << ", " // note: use *original* name << to_rust_field_type_enum(field_type) << ", " << tfield->get_key() << ")"; string field_ident_string = field_stream.str(); if (is_optional(req)) { string let_var((actual_type->is_base_type() && !actual_type->is_string()) ? "fld_var" : "ref fld_var"); f_gen_ << indent() << "if let Some(" << let_var << ") = " << field_var << " {" << endl; indent_up(); f_gen_ << indent() << "o_prot.write_field_begin(&" << field_ident_string << ")?;" << endl; render_type_sync_write("fld_var", true, field_type); f_gen_ << indent() << "o_prot.write_field_end()?;" << endl; f_gen_ << indent() << "()" << endl; // FIXME: remove this extraneous '()' indent_down(); f_gen_ << indent() << "} else {" << endl; // FIXME: remove else branch indent_up(); /* FIXME: rethink how I deal with OPT_IN_REQ_OUT if (req == t_field::T_OPT_IN_REQ_OUT) { f_gen_ << indent() << "let field_ident = " << field_ident_string << ";" << endl; f_gen_ << indent() << "o_prot.write_field_begin(&field_ident)?;" << endl; f_gen_ << indent() << "o_prot.write_field_end()?;" << endl; }*/ f_gen_ << indent() << "()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } else { f_gen_ << indent() << "o_prot.write_field_begin(&" << field_ident_string << ")?;" << endl; render_type_sync_write(field_var, field_var_is_ref, tfield->get_type()); f_gen_ << indent() << "o_prot.write_field_end()?;" << endl; } } void t_rs_generator::render_type_sync_write(const string &type_var, bool type_var_is_ref, t_type *ttype) { if (ttype->is_base_type()) { t_base_type* tbase_type = (t_base_type*)ttype; switch (tbase_type->get_base()) { case t_base_type::TYPE_VOID: throw "cannot write field of type TYPE_VOID to output protocol"; case t_base_type::TYPE_STRING: { string ref(type_var_is_ref ? "" : "&"); if (tbase_type->is_binary()) { f_gen_ << indent() << "o_prot.write_bytes(" + ref + type_var + ")?;" << endl; } else { f_gen_ << indent() << "o_prot.write_string(" + ref + type_var + ")?;" << endl; } return; } case t_base_type::TYPE_BOOL: f_gen_ << indent() << "o_prot.write_bool(" + type_var + ")?;" << endl; return; case t_base_type::TYPE_I8: f_gen_ << indent() << "o_prot.write_i8(" + type_var + ")?;" << endl; return; case t_base_type::TYPE_I16: f_gen_ << indent() << "o_prot.write_i16(" + type_var + ")?;" << endl; return; case t_base_type::TYPE_I32: f_gen_ << indent() << "o_prot.write_i32(" + type_var + ")?;" << endl; return; case t_base_type::TYPE_I64: f_gen_ << indent() << "o_prot.write_i64(" + type_var + ")?;" << endl; return; case t_base_type::TYPE_DOUBLE: f_gen_ << indent() << "o_prot.write_double(" + type_var + ".into())?;" << endl; return; } } else if (ttype->is_typedef()) { t_typedef* ttypedef = (t_typedef*) ttype; render_type_sync_write(type_var, type_var_is_ref, ttypedef->get_type()); return; } else if (ttype->is_enum() || ttype->is_struct() || ttype->is_xception()) { f_gen_ << indent() << type_var + ".write_to_out_protocol(o_prot)?;" << endl; return; } else if (ttype->is_map()) { render_map_sync_write(type_var, type_var_is_ref, (t_map *) ttype); return; } else if (ttype->is_set()) { render_set_sync_write(type_var, type_var_is_ref, (t_set *) ttype); return; } else if (ttype->is_list()) { render_list_sync_write(type_var, type_var_is_ref, (t_list *) ttype); return; } throw "cannot write unsupported type " + ttype->get_name(); } void t_rs_generator::render_list_sync_write(const string &list_var, bool list_var_is_ref, t_list *tlist) { t_type* elem_type = tlist->get_elem_type(); f_gen_ << indent() << "o_prot.write_list_begin(" << "&TListIdentifier::new(" << to_rust_field_type_enum(elem_type) << ", " << list_var << ".len() as i32" << ")" << ")?;" << endl; string ref(list_var_is_ref ? "" : "&"); f_gen_ << indent() << "for e in " << ref << list_var << " {" << endl; indent_up(); render_type_sync_write(string_container_write_variable(elem_type, "e"), true, elem_type); f_gen_ << indent() << "o_prot.write_list_end()?;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_set_sync_write(const string &set_var, bool set_var_is_ref, t_set *tset) { t_type* elem_type = tset->get_elem_type(); f_gen_ << indent() << "o_prot.write_set_begin(" << "&TSetIdentifier::new(" << to_rust_field_type_enum(elem_type) << ", " << set_var << ".len() as i32" << ")" << ")?;" << endl; string ref(set_var_is_ref ? "" : "&"); f_gen_ << indent() << "for e in " << ref << set_var << " {" << endl; indent_up(); render_type_sync_write(string_container_write_variable(elem_type, "e"), true, elem_type); f_gen_ << indent() << "o_prot.write_set_end()?;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_map_sync_write(const string &map_var, bool map_var_is_ref, t_map *tmap) { t_type* key_type = tmap->get_key_type(); t_type* val_type = tmap->get_val_type(); f_gen_ << indent() << "o_prot.write_map_begin(" << "&TMapIdentifier::new(" << to_rust_field_type_enum(key_type) << ", " << to_rust_field_type_enum(val_type) << ", " << map_var << ".len() as i32)" << ")?;" << endl; string ref(map_var_is_ref ? "" : "&"); f_gen_ << indent() << "for (k, v) in " << ref << map_var << " {" << endl; indent_up(); render_type_sync_write(string_container_write_variable(key_type, "k"), true, key_type); render_type_sync_write(string_container_write_variable(val_type, "v"), true, val_type); f_gen_ << indent() << "o_prot.write_map_end()?;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } string t_rs_generator::string_container_write_variable(t_type* ttype, const string& base_var) { bool type_needs_deref = needs_deref_on_container_write(ttype); bool type_is_double = is_double(ttype); string write_variable; if (type_is_double && type_needs_deref) { write_variable = "(*" + base_var + ")"; } else if (type_needs_deref) { write_variable = "*" + base_var; } else { write_variable = base_var; } return write_variable; } bool t_rs_generator::needs_deref_on_container_write(t_type* ttype) { ttype = get_true_type(ttype); return ttype->is_base_type() && !ttype->is_string(); } //----------------------------------------------------------------------------- // // Sync Struct Read // //----------------------------------------------------------------------------- void t_rs_generator::render_struct_sync_read( const string &struct_name, t_struct *tstruct, t_rs_generator::e_struct_type struct_type ) { f_gen_ << indent() << visibility_qualifier(struct_type) << "fn read_from_in_protocol(i_prot: &mut TInputProtocol) -> thrift::Result<" << struct_name << "> {" << endl; indent_up(); f_gen_ << indent() << "i_prot.read_struct_begin()?;" << endl; // create temporary variables: one for each field in the struct const vector<t_field*> members = tstruct->get_sorted_members(); vector<t_field*>::const_iterator members_iter; for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); t_field::e_req member_req = actual_field_req(member, struct_type); f_gen_ << indent() << "let mut " << struct_field_read_temp_variable(member) << ": Option<" << to_rust_type(member->get_type()) << "> = "; if (member_req == t_field::T_OPT_IN_REQ_OUT) { f_gen_ << opt_in_req_out_value(member->get_type()) << ";"; } else { f_gen_ << "None;"; } f_gen_ << endl; } // now loop through the fields we've received f_gen_ << indent() << "loop {" << endl; // start loop indent_up(); // break out if you've found the Stop field f_gen_ << indent() << "let field_ident = i_prot.read_field_begin()?;" << endl; f_gen_ << indent() << "if field_ident.field_type == TType::Stop {" << endl; indent_up(); f_gen_ << indent() << "break;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; // now read all the fields found f_gen_ << indent() << "let field_id = field_id(&field_ident)?;" << endl; f_gen_ << indent() << "match field_id {" << endl; // start match indent_up(); for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* tfield = (*members_iter); f_gen_ << indent() << tfield->get_key() << " => {" << endl; indent_up(); render_type_sync_read("val", tfield->get_type()); f_gen_ << indent() << struct_field_read_temp_variable(tfield) << " = Some(val);" << endl; indent_down(); f_gen_ << indent() << "}," << endl; } // default case (skip fields) f_gen_ << indent() << "_ => {" << endl; indent_up(); f_gen_ << indent() << "i_prot.skip(field_ident.field_type)?;" << endl; indent_down(); f_gen_ << indent() << "}," << endl; indent_down(); f_gen_ << indent() << "};" << endl; // finish match f_gen_ << indent() << "i_prot.read_field_end()?;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; // finish loop f_gen_ << indent() << "i_prot.read_struct_end()?;" << endl; // read message footer from the wire // verify that all required fields exist for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* tfield = (*members_iter); t_field::e_req req = actual_field_req(tfield, struct_type); if (!is_optional(req)) { f_gen_ << indent() << "verify_required_field_exists(" << "\"" << struct_name << "." << rust_field_name(tfield) << "\"" << ", " << "&" << struct_field_read_temp_variable(tfield) << ")?;" << endl; } } // construct the struct if (members.size() == 0) { f_gen_ << indent() << "let ret = " << struct_name << " {};" << endl; } else { f_gen_ << indent() << "let ret = " << struct_name << " {" << endl; indent_up(); for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* tfield = (*members_iter); t_field::e_req req = actual_field_req(tfield, struct_type); string field_name(rust_field_name(tfield)); string field_key = struct_field_read_temp_variable(tfield); if (is_optional(req)) { f_gen_ << indent() << field_name << ": " << field_key << "," << endl; } else { f_gen_ << indent() << field_name << ": " << field_key << ".expect(\"auto-generated code should have checked for presence of required fields\")" << "," << endl; } } indent_down(); f_gen_ << indent() << "};" << endl; } // return the constructed value f_gen_ << indent() << "Ok(ret)" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_union_sync_read(const string &union_name, t_struct *tstruct) { f_gen_ << indent() << "pub fn read_from_in_protocol(i_prot: &mut TInputProtocol) -> thrift::Result<" << union_name << "> {" << endl; indent_up(); // create temporary variables to hold the // completed union as well as a count of fields read f_gen_ << indent() << "let mut ret: Option<" << union_name << "> = None;" << endl; f_gen_ << indent() << "let mut received_field_count = 0;" << endl; // read the struct preamble f_gen_ << indent() << "i_prot.read_struct_begin()?;" << endl; // now loop through the fields we've received f_gen_ << indent() << "loop {" << endl; // start loop indent_up(); // break out if you've found the Stop field f_gen_ << indent() << "let field_ident = i_prot.read_field_begin()?;" << endl; f_gen_ << indent() << "if field_ident.field_type == TType::Stop {" << endl; indent_up(); f_gen_ << indent() << "break;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; // now read all the fields found f_gen_ << indent() << "let field_id = field_id(&field_ident)?;" << endl; f_gen_ << indent() << "match field_id {" << endl; // start match indent_up(); const vector<t_field*> members = tstruct->get_sorted_members(); vector<t_field*>::const_iterator members_iter; for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); f_gen_ << indent() << member->get_key() << " => {" << endl; indent_up(); render_type_sync_read("val", member->get_type()); f_gen_ << indent() << "if ret.is_none() {" << endl; indent_up(); f_gen_ << indent() << "ret = Some(" << union_name << "::" << rust_union_field_name(member) << "(val));" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << indent() << "received_field_count += 1;" << endl; indent_down(); f_gen_ << indent() << "}," << endl; } // default case (skip fields) f_gen_ << indent() << "_ => {" << endl; indent_up(); f_gen_ << indent() << "i_prot.skip(field_ident.field_type)?;" << endl; f_gen_ << indent() << "received_field_count += 1;" << endl; indent_down(); f_gen_ << indent() << "}," << endl; indent_down(); f_gen_ << indent() << "};" << endl; // finish match f_gen_ << indent() << "i_prot.read_field_end()?;" << endl; indent_down(); f_gen_ << indent() << "}" << endl; // finish loop f_gen_ << indent() << "i_prot.read_struct_end()?;" << endl; // finish reading message from wire // return the value or an error f_gen_ << indent() << "if received_field_count == 0 {" << endl; indent_up(); render_rift_error( "Protocol", "ProtocolError", "ProtocolErrorKind::InvalidData", "\"received empty union from remote " + union_name + "\"" ); indent_down(); f_gen_ << indent() << "} else if received_field_count > 1 {" << endl; indent_up(); render_rift_error( "Protocol", "ProtocolError", "ProtocolErrorKind::InvalidData", "\"received multiple fields for union from remote " + union_name + "\"" ); indent_down(); f_gen_ << indent() << "} else {" << endl; indent_up(); f_gen_ << indent() << "Ok(ret.expect(\"return value should have been constructed\"))" << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } // Construct the rust representation of all supported types from the wire. void t_rs_generator::render_type_sync_read(const string &type_var, t_type *ttype, bool is_boxed) { if (ttype->is_base_type()) { t_base_type* tbase_type = (t_base_type*)ttype; switch (tbase_type->get_base()) { case t_base_type::TYPE_VOID: throw "cannot read field of type TYPE_VOID from input protocol"; case t_base_type::TYPE_STRING: if (tbase_type->is_binary()) { f_gen_ << indent() << "let " << type_var << " = i_prot.read_bytes()?;" << endl; } else { f_gen_ << indent() << "let " << type_var << " = i_prot.read_string()?;" << endl; } return; case t_base_type::TYPE_BOOL: f_gen_ << indent() << "let " << type_var << " = i_prot.read_bool()?;" << endl; return; case t_base_type::TYPE_I8: f_gen_ << indent() << "let " << type_var << " = i_prot.read_i8()?;" << endl; return; case t_base_type::TYPE_I16: f_gen_ << indent() << "let " << type_var << " = i_prot.read_i16()?;" << endl; return; case t_base_type::TYPE_I32: f_gen_ << indent() << "let " << type_var << " = i_prot.read_i32()?;" << endl; return; case t_base_type::TYPE_I64: f_gen_ << indent() << "let " << type_var << " = i_prot.read_i64()?;" << endl; return; case t_base_type::TYPE_DOUBLE: f_gen_ << indent() << "let " << type_var << " = OrderedFloat::from(i_prot.read_double()?);" << endl; return; } } else if (ttype->is_typedef()) { // FIXME: not a fan of separate `is_boxed` parameter // This is problematic because it's an optional parameter, and only comes // into play once. The core issue is that I lose an important piece of type // information (whether the type is a fwd ref) by unwrapping the typedef'd // type and making the recursive call using it. I can't modify or wrap the // generated string after the fact because it's written directly into the file, // so I have to pass this parameter along. Going with this approach because it // seems like the lowest-cost option to easily support recursive types. t_typedef* ttypedef = (t_typedef*)ttype; render_type_sync_read(type_var, ttypedef->get_type(), ttypedef->is_forward_typedef()); return; } else if (ttype->is_enum() || ttype->is_struct() || ttype->is_xception()) { string read_call(to_rust_type(ttype) + "::read_from_in_protocol(i_prot)?"); read_call = is_boxed ? "Box::new(" + read_call + ")" : read_call; f_gen_ << indent() << "let " << type_var << " = " << read_call << ";" << endl; return; } else if (ttype->is_map()) { render_map_sync_read((t_map *) ttype, type_var); return; } else if (ttype->is_set()) { render_set_sync_read((t_set *) ttype, type_var); return; } else if (ttype->is_list()) { render_list_sync_read((t_list *) ttype, type_var); return; } throw "cannot read unsupported type " + ttype->get_name(); } // Construct the rust representation of a list from the wire. void t_rs_generator::render_list_sync_read(t_list *tlist, const string &list_var) { t_type* elem_type = tlist->get_elem_type(); f_gen_ << indent() << "let list_ident = i_prot.read_list_begin()?;" << endl; f_gen_ << indent() << "let mut " << list_var << ": " << to_rust_type((t_type*) tlist) << " = Vec::with_capacity(list_ident.size as usize);" << endl; f_gen_ << indent() << "for _ in 0..list_ident.size {" << endl; indent_up(); string list_elem_var = tmp("list_elem_"); render_type_sync_read(list_elem_var, elem_type); f_gen_ << indent() << list_var << ".push(" << list_elem_var << ");" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << indent() << "i_prot.read_list_end()?;" << endl; } // Construct the rust representation of a set from the wire. void t_rs_generator::render_set_sync_read(t_set *tset, const string &set_var) { t_type* elem_type = tset->get_elem_type(); f_gen_ << indent() << "let set_ident = i_prot.read_set_begin()?;" << endl; f_gen_ << indent() << "let mut " << set_var << ": " << to_rust_type((t_type*) tset) << " = BTreeSet::new();" << endl; f_gen_ << indent() << "for _ in 0..set_ident.size {" << endl; indent_up(); string set_elem_var = tmp("set_elem_"); render_type_sync_read(set_elem_var, elem_type); f_gen_ << indent() << set_var << ".insert(" << set_elem_var << ");" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << indent() << "i_prot.read_set_end()?;" << endl; } // Construct the rust representation of a map from the wire. void t_rs_generator::render_map_sync_read(t_map *tmap, const string &map_var) { t_type* key_type = tmap->get_key_type(); t_type* val_type = tmap->get_val_type(); f_gen_ << indent() << "let map_ident = i_prot.read_map_begin()?;" << endl; f_gen_ << indent() << "let mut " << map_var << ": " << to_rust_type((t_type*) tmap) << " = BTreeMap::new();" << endl; f_gen_ << indent() << "for _ in 0..map_ident.size {" << endl; indent_up(); string key_elem_var = tmp("map_key_"); render_type_sync_read(key_elem_var, key_type); string val_elem_var = tmp("map_val_"); render_type_sync_read(val_elem_var, val_type); f_gen_ << indent() << map_var << ".insert(" << key_elem_var << ", " << val_elem_var << ");" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << indent() << "i_prot.read_map_end()?;" << endl; } string t_rs_generator::struct_field_read_temp_variable(t_field* tfield) { std::ostringstream foss; foss << "f_" << tfield->get_key(); return foss.str(); } //----------------------------------------------------------------------------- // // Sync Client // //----------------------------------------------------------------------------- void t_rs_generator::generate_service(t_service* tservice) { render_sync_client(tservice); render_sync_processor(tservice); render_service_call_structs(tservice); } void t_rs_generator::render_service_call_structs(t_service* tservice) { const std::vector<t_function*> functions = tservice->get_functions(); std::vector<t_function*>::const_iterator func_iter; // thrift args for service calls are packed // into a struct that's transmitted over the wire, so // generate structs for those too // // thrift returns are *also* packed into a struct // that's passed over the wire, so, generate the struct // for that too. Note that this result struct *also* // contains the exceptions as well for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* tfunc = (*func_iter); render_struct(rust_struct_name(tfunc->get_arglist()), tfunc->get_arglist(), t_rs_generator::T_ARGS); if (!tfunc->is_oneway()) { render_result_value_struct(tfunc); } } } void t_rs_generator::render_sync_client(t_service* tservice) { string client_impl_name(rust_sync_client_impl_name(tservice)); render_type_comment(tservice->get_name() + " service client"); // note: use *original* name render_sync_client_trait(tservice); render_sync_client_marker_trait(tservice); render_sync_client_definition_and_impl(client_impl_name); render_sync_client_tthriftclient_impl(client_impl_name); render_sync_client_marker_trait_impls(tservice, client_impl_name); f_gen_ << endl; render_sync_client_process_impl(tservice); } void t_rs_generator::render_sync_client_trait(t_service *tservice) { string extension = ""; if (tservice->get_extends()) { t_service* extends = tservice->get_extends(); extension = " : " + rust_namespace(extends) + rust_sync_client_trait_name(extends); } render_rustdoc((t_doc*) tservice); f_gen_ << "pub trait " << rust_sync_client_trait_name(tservice) << extension << " {" << endl; indent_up(); const std::vector<t_function*> functions = tservice->get_functions(); std::vector<t_function*>::const_iterator func_iter; for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* tfunc = (*func_iter); string func_name = service_call_client_function_name(tfunc); string func_args = rust_sync_service_call_declaration(tfunc, true); string func_return = to_rust_type(tfunc->get_returntype()); render_rustdoc((t_doc*) tfunc); f_gen_ << indent() << "fn " << func_name << func_args << " -> thrift::Result<" << func_return << ">;" << endl; } indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_sync_client_marker_trait(t_service *tservice) { f_gen_ << indent() << "pub trait " << rust_sync_client_marker_trait_name(tservice) << " {}" << endl; f_gen_ << endl; } void t_rs_generator::render_sync_client_marker_trait_impls(t_service *tservice, const string &impl_struct_name) { f_gen_ << indent() << "impl " << SYNC_CLIENT_GENERIC_BOUND_VARS << " " << rust_namespace(tservice) << rust_sync_client_marker_trait_name(tservice) << " for " << impl_struct_name << SYNC_CLIENT_GENERIC_BOUND_VARS << " " << SYNC_CLIENT_GENERIC_BOUNDS << " {}" << endl; t_service* extends = tservice->get_extends(); if (extends) { render_sync_client_marker_trait_impls(extends, impl_struct_name); } } void t_rs_generator::render_sync_client_definition_and_impl(const string& client_impl_name) { // render the definition for the client struct f_gen_ << "pub struct " << client_impl_name << SYNC_CLIENT_GENERIC_BOUND_VARS << " " << SYNC_CLIENT_GENERIC_BOUNDS << " {" << endl; indent_up(); f_gen_ << indent() << "_i_prot: IP," << endl; f_gen_ << indent() << "_o_prot: OP," << endl; f_gen_ << indent() << "_sequence_number: i32," << endl; indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; // render the struct implementation // this includes the new() function as well as the helper send/recv methods for each service call f_gen_ << "impl " << SYNC_CLIENT_GENERIC_BOUND_VARS << " " << client_impl_name << SYNC_CLIENT_GENERIC_BOUND_VARS << " " << SYNC_CLIENT_GENERIC_BOUNDS << " {" << endl; indent_up(); render_sync_client_lifecycle_functions(client_impl_name); indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_sync_client_lifecycle_functions(const string& client_struct) { f_gen_ << indent() << "pub fn new(input_protocol: IP, output_protocol: OP) -> " << client_struct << SYNC_CLIENT_GENERIC_BOUND_VARS << " {" << endl; indent_up(); f_gen_ << indent() << client_struct << " { _i_prot: input_protocol, _o_prot: output_protocol, _sequence_number: 0 }" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_sync_client_tthriftclient_impl(const string &client_impl_name) { f_gen_ << indent() << "impl " << SYNC_CLIENT_GENERIC_BOUND_VARS << " TThriftClient for " << client_impl_name << SYNC_CLIENT_GENERIC_BOUND_VARS << " " << SYNC_CLIENT_GENERIC_BOUNDS << " {" << endl; indent_up(); f_gen_ << indent() << "fn i_prot_mut(&mut self) -> &mut TInputProtocol { &mut self._i_prot }" << endl; f_gen_ << indent() << "fn o_prot_mut(&mut self) -> &mut TOutputProtocol { &mut self._o_prot }" << endl; f_gen_ << indent() << "fn sequence_number(&self) -> i32 { self._sequence_number }" << endl; f_gen_ << indent() << "fn increment_sequence_number(&mut self) -> i32 { self._sequence_number += 1; self._sequence_number }" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_sync_client_process_impl(t_service* tservice) { string marker_extension = "" + sync_client_marker_traits_for_extension(tservice); f_gen_ << "impl <C: TThriftClient + " << rust_sync_client_marker_trait_name(tservice) << marker_extension << "> " << rust_sync_client_trait_name(tservice) << " for C {" << endl; indent_up(); const std::vector<t_function*> functions = tservice->get_functions(); std::vector<t_function*>::const_iterator func_iter; for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* func = (*func_iter); render_sync_send_recv_wrapper(func); } indent_down(); f_gen_ << "}" << endl; f_gen_ << endl; } string t_rs_generator::sync_client_marker_traits_for_extension(t_service *tservice) { string marker_extension; t_service* extends = tservice->get_extends(); if (extends) { marker_extension = " + " + rust_namespace(extends) + rust_sync_client_marker_trait_name(extends); marker_extension = marker_extension + sync_client_marker_traits_for_extension(extends); } return marker_extension; } void t_rs_generator::render_sync_send_recv_wrapper(t_function* tfunc) { string func_name = service_call_client_function_name(tfunc); string func_decl_args = rust_sync_service_call_declaration(tfunc, true); string func_call_args = rust_sync_service_call_invocation(tfunc); string func_return = to_rust_type(tfunc->get_returntype()); f_gen_ << indent() << "fn " << func_name << func_decl_args << " -> thrift::Result<" << func_return << "> {" << endl; indent_up(); f_gen_ << indent() << "(" << endl; indent_up(); render_sync_send(tfunc); indent_down(); f_gen_ << indent() << ")?;" << endl; if (tfunc->is_oneway()) { f_gen_ << indent() << "Ok(())" << endl; } else { render_sync_recv(tfunc); } indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_sync_send(t_function* tfunc) { f_gen_ << indent() << "{" << endl; indent_up(); // increment the sequence number and generate the call header string message_type = tfunc->is_oneway() ? "TMessageType::OneWay" : "TMessageType::Call"; f_gen_ << indent() << "self.increment_sequence_number();" << endl; f_gen_ << indent() << "let message_ident = " << "TMessageIdentifier::new(\"" << tfunc->get_name() << "\", " // note: use *original* name << message_type << ", " << "self.sequence_number());" << endl; // pack the arguments into the containing struct that we'll write out over the wire // note that this struct is generated even if we have 0 args ostringstream struct_definition; vector<t_field*> members = tfunc->get_arglist()->get_sorted_members(); vector<t_field*>::iterator members_iter; for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* member = (*members_iter); string member_name(rust_field_name(member)); struct_definition << member_name << ": " << member_name << ", "; } string struct_fields = struct_definition.str(); if (struct_fields.size() > 0) { struct_fields = struct_fields.substr(0, struct_fields.size() - 2); // strip trailing comma } f_gen_ << indent() << "let call_args = " << rust_struct_name(tfunc->get_arglist()) << " { " << struct_fields << " };" << endl; // write everything over the wire f_gen_ << indent() << "self.o_prot_mut().write_message_begin(&message_ident)?;" << endl; f_gen_ << indent() << "call_args.write_to_out_protocol(self.o_prot_mut())?;" << endl; // written even if we have 0 args f_gen_ << indent() << "self.o_prot_mut().write_message_end()?;" << endl; f_gen_ << indent() << "self.o_prot_mut().flush()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_sync_recv(t_function* tfunc) { f_gen_ << indent() << "{" << endl; indent_up(); f_gen_ << indent() << "let message_ident = self.i_prot_mut().read_message_begin()?;" << endl; f_gen_ << indent() << "verify_expected_sequence_number(self.sequence_number(), message_ident.sequence_number)?;" << endl; f_gen_ << indent() << "verify_expected_service_call(\"" << tfunc->get_name() <<"\", &message_ident.name)?;" << endl; // note: use *original* name // FIXME: replace with a "try" block f_gen_ << indent() << "if message_ident.message_type == TMessageType::Exception {" << endl; indent_up(); f_gen_ << indent() << "let remote_error = thrift::Error::read_application_error_from_in_protocol(self.i_prot_mut())?;" << endl; f_gen_ << indent() << "self.i_prot_mut().read_message_end()?;" << endl; f_gen_ << indent() << "return Err(thrift::Error::Application(remote_error))" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << indent() << "verify_expected_message_type(TMessageType::Reply, message_ident.message_type)?;" << endl; f_gen_ << indent() << "let result = " << service_call_result_struct_name(tfunc) << "::read_from_in_protocol(self.i_prot_mut())?;" << endl; f_gen_ << indent() << "self.i_prot_mut().read_message_end()?;" << endl; f_gen_ << indent() << "result.ok_or()" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } string t_rs_generator::rust_sync_service_call_declaration(t_function* tfunc, bool self_is_mutable) { ostringstream func_args; if (self_is_mutable) { func_args << "(&mut self"; } else { func_args << "(&self"; } if (has_args(tfunc)) { func_args << ", "; // put comma after "self" func_args << struct_to_declaration(tfunc->get_arglist(), T_ARGS); } func_args << ")"; return func_args.str(); } string t_rs_generator::rust_sync_service_call_invocation(t_function* tfunc, const string& field_prefix) { ostringstream func_args; func_args << "("; if (has_args(tfunc)) { func_args << struct_to_invocation(tfunc->get_arglist(), field_prefix); } func_args << ")"; return func_args.str(); } string t_rs_generator::struct_to_declaration(t_struct* tstruct, t_rs_generator::e_struct_type struct_type) { ostringstream args; bool first_arg = true; std::vector<t_field*> fields = tstruct->get_sorted_members(); std::vector<t_field*>::iterator field_iter; for (field_iter = fields.begin(); field_iter != fields.end(); ++field_iter) { t_field* tfield = (*field_iter); t_field::e_req field_req = actual_field_req(tfield, struct_type); string rust_type = to_rust_type(tfield->get_type()); rust_type = is_optional(field_req) ? "Option<" + rust_type + ">" : rust_type; if (first_arg) { first_arg = false; } else { args << ", "; } args << rust_field_name(tfield) << ": " << rust_type; } return args.str(); } string t_rs_generator::struct_to_invocation(t_struct* tstruct, const string& field_prefix) { ostringstream args; bool first_arg = true; std::vector<t_field*> fields = tstruct->get_sorted_members(); std::vector<t_field*>::iterator field_iter; for (field_iter = fields.begin(); field_iter != fields.end(); ++field_iter) { t_field* tfield = (*field_iter); if (first_arg) { first_arg = false; } else { args << ", "; } args << field_prefix << rust_field_name(tfield); } return args.str(); } void t_rs_generator::render_result_value_struct(t_function* tfunc) { string result_struct_name = service_call_result_struct_name(tfunc); t_struct result(program_, result_struct_name); t_field return_value(tfunc->get_returntype(), SERVICE_RESULT_VARIABLE, 0); return_value.set_req(t_field::T_OPTIONAL); if (!tfunc->get_returntype()->is_void()) { result.append(&return_value); } t_struct* exceptions = tfunc->get_xceptions(); const vector<t_field*>& exception_types = exceptions->get_members(); vector<t_field*>::const_iterator exception_iter; for(exception_iter = exception_types.begin(); exception_iter != exception_types.end(); ++exception_iter) { t_field* exception_type = *exception_iter; exception_type->set_req(t_field::T_OPTIONAL); result.append(exception_type); } render_struct(result_struct_name, &result, t_rs_generator::T_RESULT); } //----------------------------------------------------------------------------- // // Sync Processor // //----------------------------------------------------------------------------- void t_rs_generator::render_sync_processor(t_service *tservice) { render_type_comment(tservice->get_name() + " service processor"); // note: use *original* name render_sync_handler_trait(tservice); render_sync_processor_definition_and_impl(tservice); } void t_rs_generator::render_sync_handler_trait(t_service *tservice) { string extension = ""; if (tservice->get_extends() != NULL) { t_service* extends = tservice->get_extends(); extension = " : " + rust_namespace(extends) + rust_sync_handler_trait_name(extends); } const std::vector<t_function*> functions = tservice->get_functions(); std::vector<t_function*>::const_iterator func_iter; render_rustdoc((t_doc*) tservice); f_gen_ << "pub trait " << rust_sync_handler_trait_name(tservice) << extension << " {" << endl; indent_up(); for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* tfunc = (*func_iter); string func_name = service_call_handler_function_name(tfunc); string func_args = rust_sync_service_call_declaration(tfunc, false); string func_return = to_rust_type(tfunc->get_returntype()); render_rustdoc((t_doc*) tfunc); f_gen_ << indent() << "fn " << func_name << func_args << " -> thrift::Result<" << func_return << ">;" << endl; } indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_sync_processor_definition_and_impl(t_service *tservice) { string service_processor_name = rust_sync_processor_name(tservice); string handler_trait_name = rust_sync_handler_trait_name(tservice); // struct f_gen_ << indent() << "pub struct " << service_processor_name << "<H: " << handler_trait_name << "> {" << endl; indent_up(); f_gen_ << indent() << "handler: H," << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; // delegating impl f_gen_ << indent() << "impl <H: " << handler_trait_name << "> " << service_processor_name << "<H> {" << endl; indent_up(); f_gen_ << indent() << "pub fn new(handler: H) -> " << service_processor_name << "<H> {" << endl; indent_up(); f_gen_ << indent() << service_processor_name << " {" << endl; indent_up(); f_gen_ << indent() << "handler," << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << indent() << "}" << endl; render_sync_process_delegation_functions(tservice); indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; // actual impl string service_actual_processor_name = rust_sync_processor_impl_name(tservice); f_gen_ << indent() << "pub struct " << service_actual_processor_name << ";" << endl; f_gen_ << endl; f_gen_ << indent() << "impl " << service_actual_processor_name << " {" << endl; indent_up(); vector<t_function*> functions = tservice->get_functions(); vector<t_function*>::iterator func_iter; for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* tfunc = (*func_iter); render_sync_process_function(tfunc, handler_trait_name); } indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; // processor impl f_gen_ << indent() << "impl <H: " << handler_trait_name << "> TProcessor for " << service_processor_name << "<H> {" << endl; indent_up(); f_gen_ << indent() << "fn process(&self, i_prot: &mut TInputProtocol, o_prot: &mut TOutputProtocol) -> thrift::Result<()> {" << endl; indent_up(); f_gen_ << indent() << "let message_ident = i_prot.read_message_begin()?;" << endl; f_gen_ << indent() << "let res = match &*message_ident.name {" << endl; // [sigh] explicit deref coercion indent_up(); render_process_match_statements(tservice); f_gen_ << indent() << "method => {" << endl; indent_up(); render_rift_error( "Application", "ApplicationError", "ApplicationErrorKind::UnknownMethod", "format!(\"unknown method {}\", method)" ); indent_down(); f_gen_ << indent() << "}," << endl; indent_down(); f_gen_ << indent() << "};" << endl; f_gen_ << indent() << "thrift::server::handle_process_result(&message_ident, res, o_prot)" << endl; indent_down(); f_gen_ << indent() << "}" << endl; indent_down(); f_gen_ << indent() << "}" << endl; f_gen_ << endl; } void t_rs_generator::render_sync_process_delegation_functions(t_service *tservice) { string actual_processor(rust_namespace(tservice) + rust_sync_processor_impl_name(tservice)); vector<t_function*> functions = tservice->get_functions(); vector<t_function*>::iterator func_iter; for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* tfunc = (*func_iter); string function_name("process_" + rust_snake_case(tfunc->get_name())); f_gen_ << indent() << "fn " << function_name << "(&self, " << "incoming_sequence_number: i32, " << "i_prot: &mut TInputProtocol, " << "o_prot: &mut TOutputProtocol) " << "-> thrift::Result<()> {" << endl; indent_up(); f_gen_ << indent() << actual_processor << "::" << function_name << "(" << "&self.handler, " << "incoming_sequence_number, " << "i_prot, " << "o_prot" << ")" << endl; indent_down(); f_gen_ << indent() << "}" << endl; } t_service* extends = tservice->get_extends(); if (extends) { render_sync_process_delegation_functions(extends); } } void t_rs_generator::render_process_match_statements(t_service* tservice) { vector<t_function*> functions = tservice->get_functions(); vector<t_function*>::iterator func_iter; for(func_iter = functions.begin(); func_iter != functions.end(); ++func_iter) { t_function* tfunc = (*func_iter); f_gen_ << indent() << "\"" << tfunc->get_name() << "\"" << " => {" << endl; // note: use *original* name indent_up(); f_gen_ << indent() << "self.process_" << rust_snake_case(tfunc->get_name()) << "(message_ident.sequence_number, i_prot, o_prot)" << endl; indent_down(); f_gen_ << indent() << "}," << endl; } t_service* extends = tservice->get_extends(); if (extends) { render_process_match_statements(extends); } } void t_rs_generator::render_sync_process_function(t_function *tfunc, const string &handler_type) { string sequence_number_param("incoming_sequence_number"); string output_protocol_param("o_prot"); if (tfunc->is_oneway()) { sequence_number_param = "_"; output_protocol_param = "_"; } f_gen_ << indent() << "pub fn process_" << rust_snake_case(tfunc->get_name()) << "<H: " << handler_type << ">" << "(handler: &H, " << sequence_number_param << ": i32, " << "i_prot: &mut TInputProtocol, " << output_protocol_param << ": &mut TOutputProtocol) " << "-> thrift::Result<()> {" << endl; indent_up(); // *always* read arguments from the input protocol f_gen_ << indent() << "let " << (has_non_void_args(tfunc) ? "args" : "_") << " = " << rust_struct_name(tfunc->get_arglist()) << "::read_from_in_protocol(i_prot)?;" << endl; f_gen_ << indent() << "match handler." << service_call_handler_function_name(tfunc) << rust_sync_service_call_invocation(tfunc, "args.") << " {" << endl; // start match indent_up(); // handler succeeded string handler_return_variable = tfunc->is_oneway() || tfunc->get_returntype()->is_void() ? "_" : "handler_return"; f_gen_ << indent() << "Ok(" << handler_return_variable << ") => {" << endl; indent_up(); render_sync_handler_succeeded(tfunc); indent_down(); f_gen_ << indent() << "}," << endl; // handler failed f_gen_ << indent() << "Err(e) => {" << endl; indent_up(); render_sync_handler_failed(tfunc); indent_down(); f_gen_ << indent() << "}," << endl; indent_down(); f_gen_ << indent() << "}" << endl; // end match indent_down(); f_gen_ << indent() << "}" << endl; // end function } void t_rs_generator::render_sync_handler_succeeded(t_function *tfunc) { if (tfunc->is_oneway()) { f_gen_ << indent() << "Ok(())" << endl; } else { f_gen_ << indent() << "let message_ident = TMessageIdentifier::new(" << "\"" << tfunc->get_name() << "\", " // note: use *original* name << "TMessageType::Reply, " << "incoming_sequence_number);" << endl; f_gen_ << indent() << "o_prot.write_message_begin(&message_ident)?;" << endl; f_gen_ << indent() << "let ret = " << handler_successful_return_struct(tfunc) <<";" << endl; f_gen_ << indent() << "ret.write_to_out_protocol(o_prot)?;" << endl; f_gen_ << indent() << "o_prot.write_message_end()?;" << endl; f_gen_ << indent() << "o_prot.flush()" << endl; } } void t_rs_generator::render_sync_handler_failed(t_function *tfunc) { string err_var("e"); f_gen_ << indent() << "match " << err_var << " {" << endl; indent_up(); // if there are any user-defined exceptions for this service call handle them first if (tfunc->get_xceptions() != NULL && tfunc->get_xceptions()->get_sorted_members().size() > 0) { string user_err_var("usr_err"); f_gen_ << indent() << "thrift::Error::User(" << user_err_var << ") => {" << endl; indent_up(); render_sync_handler_failed_user_exception_branch(tfunc); indent_down(); f_gen_ << indent() << "}," << endl; } // application error string app_err_var("app_err"); f_gen_ << indent() << "thrift::Error::Application(" << app_err_var << ") => {" << endl; indent_up(); render_sync_handler_failed_application_exception_branch(tfunc, app_err_var); indent_down(); f_gen_ << indent() << "}," << endl; // default case f_gen_ << indent() << "_ => {" << endl; indent_up(); render_sync_handler_failed_default_exception_branch(tfunc); indent_down(); f_gen_ << indent() << "}," << endl; indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_sync_handler_failed_user_exception_branch(t_function *tfunc) { if (tfunc->get_xceptions() == NULL || tfunc->get_xceptions()->get_sorted_members().empty()) { throw "cannot render user exception branches if no user exceptions defined"; } const vector<t_field*> txceptions = tfunc->get_xceptions()->get_sorted_members(); vector<t_field*>::const_iterator xception_iter; int branches_rendered = 0; // run through all user-defined exceptions for (xception_iter = txceptions.begin(); xception_iter != txceptions.end(); ++xception_iter) { t_field* xception_field = (*xception_iter); string if_statement(branches_rendered == 0 ? "if usr_err" : "} else if usr_err"); string exception_type(to_rust_type(xception_field->get_type())); f_gen_ << indent() << if_statement << ".downcast_ref::<" << exception_type << ">().is_some() {" << endl; indent_up(); f_gen_ << indent() << "let err = usr_err.downcast::<" << exception_type << ">().expect(\"downcast already checked\");" << endl; // render the members of the return struct ostringstream members; bool has_result_variable = !(tfunc->is_oneway() || tfunc->get_returntype()->is_void()); if (has_result_variable) { members << SERVICE_RESULT_VARIABLE << ": None, "; } vector<t_field*>::const_iterator xception_members_iter; for(xception_members_iter = txceptions.begin(); xception_members_iter != txceptions.end(); ++xception_members_iter) { t_field* member = (*xception_members_iter); string member_name(rust_field_name(member)); if (member == xception_field) { members << member_name << ": Some(*err), "; } else { members << member_name << ": None, "; } } string member_string = members.str(); member_string.replace(member_string.size() - 2, 2, " "); // trim trailing comma // now write out the return struct f_gen_ << indent() << "let ret_err = " << service_call_result_struct_name(tfunc) << "{ " << member_string << "};" << endl; f_gen_ << indent() << "let message_ident = " << "TMessageIdentifier::new(" << "\"" << tfunc->get_name() << "\", " // note: use *original* name << "TMessageType::Reply, " << "incoming_sequence_number);" << endl; f_gen_ << indent() << "o_prot.write_message_begin(&message_ident)?;" << endl; f_gen_ << indent() << "ret_err.write_to_out_protocol(o_prot)?;" << endl; f_gen_ << indent() << "o_prot.write_message_end()?;" << endl; f_gen_ << indent() << "o_prot.flush()" << endl; indent_down(); branches_rendered++; } // the catch all, if somehow it was a user exception that we don't support f_gen_ << indent() << "} else {" << endl; indent_up(); // FIXME: same as default block below f_gen_ << indent() << "let ret_err = {" << endl; indent_up(); render_rift_error_struct("ApplicationError", "ApplicationErrorKind::Unknown", "usr_err.description()"); indent_down(); f_gen_ << indent() << "};" << endl; render_sync_handler_send_exception_response(tfunc, "ret_err"); indent_down(); f_gen_ << indent() << "}" << endl; } void t_rs_generator::render_sync_handler_failed_application_exception_branch( t_function *tfunc, const string &app_err_var ) { if (tfunc->is_oneway()) { f_gen_ << indent() << "Err(thrift::Error::Application(" << app_err_var << "))" << endl; } else { render_sync_handler_send_exception_response(tfunc, app_err_var); } } void t_rs_generator::render_sync_handler_failed_default_exception_branch(t_function *tfunc) { f_gen_ << indent() << "let ret_err = {" << endl; indent_up(); render_rift_error_struct("ApplicationError", "ApplicationErrorKind::Unknown", "e.description()"); indent_down(); f_gen_ << indent() << "};" << endl; if (tfunc->is_oneway()) { f_gen_ << indent() << "Err(thrift::Error::Application(ret_err))" << endl; } else { render_sync_handler_send_exception_response(tfunc, "ret_err"); } } void t_rs_generator::render_sync_handler_send_exception_response(t_function *tfunc, const string &err_var) { f_gen_ << indent() << "let message_ident = TMessageIdentifier::new(" << "\"" << tfunc->get_name() << "\", " // note: use *original* name << "TMessageType::Exception, " << "incoming_sequence_number);" << endl; f_gen_ << indent() << "o_prot.write_message_begin(&message_ident)?;" << endl; f_gen_ << indent() << "thrift::Error::write_application_error_to_out_protocol(&" << err_var << ", o_prot)?;" << endl; f_gen_ << indent() << "o_prot.write_message_end()?;" << endl; f_gen_ << indent() << "o_prot.flush()" << endl; } string t_rs_generator::handler_successful_return_struct(t_function* tfunc) { int member_count = 0; ostringstream return_struct; return_struct << service_call_result_struct_name(tfunc) << " { "; // actual return if (!tfunc->get_returntype()->is_void()) { return_struct << "result_value: Some(handler_return)"; member_count++; } // any user-defined exceptions if (tfunc->get_xceptions() != NULL) { t_struct* txceptions = tfunc->get_xceptions(); const vector<t_field*> members = txceptions->get_sorted_members(); vector<t_field*>::const_iterator members_iter; for (members_iter = members.begin(); members_iter != members.end(); ++members_iter) { t_field* xception_field = (*members_iter); if (member_count > 0) { return_struct << ", "; } return_struct << rust_field_name(xception_field) << ": None"; member_count++; } } return_struct << " }"; return return_struct.str(); } //----------------------------------------------------------------------------- // // Utility // //----------------------------------------------------------------------------- void t_rs_generator::render_type_comment(const string& type_name) { f_gen_ << "//" << endl; f_gen_ << "// " << type_name << endl; f_gen_ << "//" << endl; f_gen_ << endl; } // NOTE: do *not* put in an extra newline after doc is generated. // This is because rust docs have to abut the line they're documenting. void t_rs_generator::render_rustdoc(t_doc* tdoc) { if (!tdoc->has_doc()) { return; } generate_docstring_comment(f_gen_, "", "/// ", tdoc->get_doc(), ""); } void t_rs_generator::render_rift_error( const string& error_kind, const string& error_struct, const string& sub_error_kind, const string& error_message ) { f_gen_ << indent() << "Err(" << endl; indent_up(); f_gen_ << indent() << "thrift::Error::" << error_kind << "(" << endl; indent_up(); render_rift_error_struct(error_struct, sub_error_kind, error_message); indent_down(); f_gen_ << indent() << ")" << endl; indent_down(); f_gen_ << indent() << ")" << endl; } void t_rs_generator::render_rift_error_struct( const string& error_struct, const string& sub_error_kind, const string& error_message ) { f_gen_ << indent() << error_struct << "::new(" << endl; indent_up(); f_gen_ << indent() << sub_error_kind << "," << endl; f_gen_ << indent() << error_message << endl; indent_down(); f_gen_ << indent() << ")" << endl; } bool t_rs_generator::is_double(t_type* ttype) { ttype = get_true_type(ttype); if (ttype->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)ttype)->get_base(); if (tbase == t_base_type::TYPE_DOUBLE) { return true; } } return false; } string t_rs_generator::to_rust_type(t_type* ttype, bool ordered_float) { // ttype = get_true_type(ttype); <-- recurses through as many typedef layers as necessary if (ttype->is_base_type()) { t_base_type* tbase_type = ((t_base_type*)ttype); switch (tbase_type->get_base()) { case t_base_type::TYPE_VOID: return "()"; case t_base_type::TYPE_STRING: if (tbase_type->is_binary()) { return "Vec<u8>"; } else { return "String"; } case t_base_type::TYPE_BOOL: return "bool"; case t_base_type::TYPE_I8: return "i8"; case t_base_type::TYPE_I16: return "i16"; case t_base_type::TYPE_I32: return "i32"; case t_base_type::TYPE_I64: return "i64"; case t_base_type::TYPE_DOUBLE: if (ordered_float) { return "OrderedFloat<f64>"; } else { return "f64"; } } } else if (ttype->is_typedef()) { t_typedef* ttypedef = (t_typedef*)ttype; string rust_type = rust_namespace(ttype) + ttypedef->get_symbolic(); rust_type = ttypedef->is_forward_typedef() ? "Box<" + rust_type + ">" : rust_type; return rust_type; } else if (ttype->is_enum()) { return rust_namespace(ttype) + ttype->get_name(); } else if (ttype->is_struct() || ttype->is_xception()) { return rust_namespace(ttype) + rust_camel_case(ttype->get_name()); } else if (ttype->is_map()) { t_map* tmap = (t_map*)ttype; return "BTreeMap<" + to_rust_type(tmap->get_key_type()) + ", " + to_rust_type(tmap->get_val_type()) + ">"; } else if (ttype->is_set()) { t_set* tset = (t_set*)ttype; return "BTreeSet<" + to_rust_type(tset->get_elem_type()) + ">"; } else if (ttype->is_list()) { t_list* tlist = (t_list*)ttype; return "Vec<" + to_rust_type(tlist->get_elem_type()) + ">"; } throw "cannot find rust type for " + ttype->get_name(); } string t_rs_generator::to_rust_field_type_enum(t_type* ttype) { ttype = get_true_type(ttype); if (ttype->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)ttype)->get_base(); switch (tbase) { case t_base_type::TYPE_VOID: throw "will not generate protocol::TType for TYPE_VOID"; case t_base_type::TYPE_STRING: // both strings and binary are actually encoded as TType::String return "TType::String"; case t_base_type::TYPE_BOOL: return "TType::Bool"; case t_base_type::TYPE_I8: return "TType::I08"; case t_base_type::TYPE_I16: return "TType::I16"; case t_base_type::TYPE_I32: return "TType::I32"; case t_base_type::TYPE_I64: return "TType::I64"; case t_base_type::TYPE_DOUBLE: return "TType::Double"; } } else if (ttype->is_enum()) { return "TType::I32"; } else if (ttype->is_struct() || ttype->is_xception()) { return "TType::Struct"; } else if (ttype->is_map()) { return "TType::Map"; } else if (ttype->is_set()) { return "TType::Set"; } else if (ttype->is_list()) { return "TType::List"; } throw "cannot find TType for " + ttype->get_name(); } string t_rs_generator::opt_in_req_out_value(t_type* ttype) { ttype = get_true_type(ttype); if (ttype->is_base_type()) { t_base_type* tbase_type = ((t_base_type*)ttype); switch (tbase_type->get_base()) { case t_base_type::TYPE_VOID: throw "cannot generate OPT_IN_REQ_OUT value for void"; case t_base_type::TYPE_STRING: if (tbase_type->is_binary()) { return "Some(Vec::new())"; } else { return "Some(\"\".to_owned())"; } case t_base_type::TYPE_BOOL: return "Some(false)"; case t_base_type::TYPE_I8: case t_base_type::TYPE_I16: case t_base_type::TYPE_I32: case t_base_type::TYPE_I64: return "Some(0)"; case t_base_type::TYPE_DOUBLE: return "Some(OrderedFloat::from(0.0))"; } } else if (ttype->is_enum() || ttype->is_struct() || ttype->is_xception()) { return "None"; } else if (ttype->is_list()) { return "Some(Vec::new())"; } else if (ttype->is_set()) { return "Some(BTreeSet::new())"; } else if (ttype->is_map()) { return "Some(BTreeMap::new())"; } throw "cannot generate opt-in-req-out value for type " + ttype->get_name(); } bool t_rs_generator::can_generate_simple_const(t_type* ttype) { t_type* actual_type = get_true_type(ttype); if (actual_type->is_base_type()) { t_base_type* tbase_type = (t_base_type*)actual_type; return !(tbase_type->get_base() == t_base_type::TYPE_DOUBLE); } else { return false; } } bool t_rs_generator::can_generate_const_holder(t_type* ttype) { t_type* actual_type = get_true_type(ttype); return !can_generate_simple_const(actual_type) && !actual_type->is_service(); } bool t_rs_generator::is_void(t_type* ttype) { return ttype->is_base_type() && ((t_base_type*)ttype)->get_base() == t_base_type::TYPE_VOID; } bool t_rs_generator::is_optional(t_field::e_req req) { return req == t_field::T_OPTIONAL || req == t_field::T_OPT_IN_REQ_OUT; } t_field::e_req t_rs_generator::actual_field_req(t_field* tfield, t_rs_generator::e_struct_type struct_type) { return struct_type == t_rs_generator::T_ARGS ? t_field::T_REQUIRED : tfield->get_req(); } bool t_rs_generator::has_args(t_function* tfunc) { return tfunc->get_arglist() != NULL && !tfunc->get_arglist()->get_sorted_members().empty(); } bool t_rs_generator::has_non_void_args(t_function* tfunc) { bool has_non_void_args = false; const vector<t_field*> args = tfunc->get_arglist()->get_sorted_members(); vector<t_field*>::const_iterator args_iter; for (args_iter = args.begin(); args_iter != args.end(); ++args_iter) { t_field* tfield = (*args_iter); if (!tfield->get_type()->is_void()) { has_non_void_args = true; break; } } return has_non_void_args; } string t_rs_generator::visibility_qualifier(t_rs_generator::e_struct_type struct_type) { switch(struct_type) { case t_rs_generator::T_ARGS: case t_rs_generator::T_RESULT: return ""; default: return "pub "; } } string t_rs_generator::rust_namespace(t_service* tservice) { if (tservice->get_program()->get_name() != get_program()->get_name()) { return rust_snake_case(tservice->get_program()->get_name()) + "::"; } else { return ""; } } string t_rs_generator::rust_namespace(t_type* ttype) { if (ttype->get_program()->get_name() != get_program()->get_name()) { return rust_snake_case(ttype->get_program()->get_name()) + "::"; } else { return ""; } } bool t_rs_generator::is_reserved(const string& name) { return RUST_RESERVED_WORDS_SET.find(name) != RUST_RESERVED_WORDS_SET.end(); } string t_rs_generator::rust_struct_name(t_struct* tstruct) { string base_struct_name(rust_camel_case(tstruct->get_name())); return rust_safe_name(base_struct_name); } string t_rs_generator::rust_field_name(t_field* tfield) { string base_field_name(rust_snake_case(tfield->get_name())); return rust_safe_name(base_field_name); } string t_rs_generator::rust_union_field_name(t_field* tfield) { string base_field_name(rust_camel_case(tfield->get_name())); return rust_safe_name(base_field_name); } string t_rs_generator::rust_safe_name(const string& name) { if (is_reserved(name)) { return name + "_"; } else { return name; } } string t_rs_generator::service_call_client_function_name(t_function* tfunc) { return rust_snake_case(tfunc->get_name()); } string t_rs_generator::service_call_handler_function_name(t_function* tfunc) { return "handle_" + rust_snake_case(tfunc->get_name()); } string t_rs_generator::service_call_result_struct_name(t_function* tfunc) { return rust_camel_case(tfunc->get_name()) + RESULT_STRUCT_SUFFIX; } string t_rs_generator::rust_sync_client_marker_trait_name(t_service* tservice) { return "T" + rust_camel_case(tservice->get_name()) + "SyncClientMarker"; } string t_rs_generator::rust_sync_client_trait_name(t_service* tservice) { return "T" + rust_camel_case(tservice->get_name()) + "SyncClient"; } string t_rs_generator::rust_sync_client_impl_name(t_service* tservice) { return rust_camel_case(tservice->get_name()) + "SyncClient"; } string t_rs_generator::rust_sync_handler_trait_name(t_service* tservice) { return rust_camel_case(tservice->get_name()) + "SyncHandler"; } string t_rs_generator::rust_sync_processor_name(t_service* tservice) { return rust_camel_case(tservice->get_name()) + "SyncProcessor"; } string t_rs_generator::rust_sync_processor_impl_name(t_service *tservice) { return "T" + rust_camel_case(tservice->get_name()) + "ProcessFunctions"; } string t_rs_generator::rust_upper_case(const string& name) { string str(uppercase(underscore(name))); string_replace(str, "__", "_"); return str; } string t_rs_generator::rust_snake_case(const string& name) { string str(decapitalize(underscore(name))); string_replace(str, "__", "_"); return str; } string t_rs_generator::rust_camel_case(const string& name) { string str(capitalize(camelcase(name))); string_replace(str, "_", ""); return str; } void t_rs_generator::string_replace(string& target, const string& search_string, const string& replace_string) { if (target.empty()) { return; } size_t match_len = search_string.length(); size_t replace_len = replace_string.length(); size_t search_idx = 0; size_t match_idx; while ((match_idx = target.find(search_string, search_idx)) != string::npos) { target.replace(match_idx, match_len, replace_string); search_idx = match_idx + replace_len; } } THRIFT_REGISTER_GENERATOR( rs, "Rust", "\n") // no Rust-generator-specific options
1
14,443
Perhaps rename to `render_service_args_struct`?
apache-thrift
c
@@ -11,8 +11,8 @@ import ( "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" "github.com/filecoin-project/go-filecoin/internal/pkg/chain" "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" + crypto2 "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" "github.com/filecoin-project/go-filecoin/internal/pkg/repo" - "github.com/filecoin-project/go-filecoin/internal/pkg/types" "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" )
1
package node import ( "context" bstore "github.com/ipfs/go-ipfs-blockstore" keystore "github.com/ipfs/go-ipfs-keystore" "github.com/libp2p/go-libp2p-core/crypto" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" "github.com/filecoin-project/go-filecoin/internal/pkg/chain" "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" "github.com/filecoin-project/go-filecoin/internal/pkg/repo" "github.com/filecoin-project/go-filecoin/internal/pkg/types" "github.com/filecoin-project/go-filecoin/internal/pkg/wallet" ) const defaultPeerKeyBits = 2048 // initCfg contains configuration for initializing a node's repo. type initCfg struct { peerKey crypto.PrivKey defaultKey *types.KeyInfo initImports []*types.KeyInfo } // InitOpt is an option for initialization of a node's repo. type InitOpt func(*initCfg) // PeerKeyOpt sets the private key for a node's 'self' libp2p identity. // If unspecified, initialization will create a new one. func PeerKeyOpt(k crypto.PrivKey) InitOpt { return func(opts *initCfg) { opts.peerKey = k } } // DefaultKeyOpt sets the private key for the wallet's default account. // If unspecified, initialization will create a new one. func DefaultKeyOpt(ki *types.KeyInfo) InitOpt { return func(opts *initCfg) { opts.defaultKey = ki } } // ImportKeyOpt imports the provided key during initialization. func ImportKeyOpt(ki *types.KeyInfo) InitOpt { return func(opts *initCfg) { opts.initImports = append(opts.initImports, ki) } } // Init initializes a Filecoin repo with genesis state and keys. // This will always set the configuration for wallet default address (to the specified default // key or a newly generated one), but otherwise leave the repo's config object intact. // Make further configuration changes after initialization. func Init(ctx context.Context, r repo.Repo, gen consensus.GenesisInitFunc, opts ...InitOpt) error { cfg := new(initCfg) for _, o := range opts { o(cfg) } bs := bstore.NewBlockstore(r.Datastore()) cst := cborutil.NewIpldStore(bs) if _, err := chain.Init(ctx, r, bs, cst, gen); err != nil { return errors.Wrap(err, "Could not Init Node") } if err := initPeerKey(r.Keystore(), cfg.peerKey); err != nil { return err } backend, err := wallet.NewDSBackend(r.WalletDatastore()) if err != nil { return errors.Wrap(err, "failed to open wallet datastore") } w := wallet.New(backend) defaultKey, err := initDefaultKey(w, cfg.defaultKey) if err != nil { return err } err = importInitKeys(w, cfg.initImports) if err != nil { return err } defaultAddress, err := defaultKey.Address() if err != nil { return errors.Wrap(err, "failed to extract address from default key") } r.Config().Wallet.DefaultAddress = defaultAddress if err = r.ReplaceConfig(r.Config()); err != nil { return errors.Wrap(err, "failed to write config") } return nil } func initPeerKey(store keystore.Keystore, key crypto.PrivKey) error { var err error if key == nil { key, _, err = crypto.GenerateKeyPair(crypto.RSA, defaultPeerKeyBits) if err != nil { return errors.Wrap(err, "failed to create peer key") } } if err := store.Put("self", key); err != nil { return errors.Wrap(err, "failed to store private key") } return nil } func initDefaultKey(w *wallet.Wallet, key *types.KeyInfo) (*types.KeyInfo, error) { var err error if key == nil { key, err = w.NewKeyInfo() if err != nil { return nil, errors.Wrap(err, "failed to create default key") } } else { if _, err := w.Import(key); err != nil { return nil, errors.Wrap(err, "failed to import default key") } } return key, nil } func importInitKeys(w *wallet.Wallet, importKeys []*types.KeyInfo) error { for _, ki := range importKeys { _, err := w.Import(ki) if err != nil { return err } } return nil }
1
22,926
clean the name
filecoin-project-venus
go
@@ -70,6 +70,13 @@ func (a BrokerMetricAssertion) Assert(client *monitoring.MetricClient) error { if err != nil { return fmt.Errorf("metric has invalid response code label: %v", ts.GetMetric()) } + + // Workarounds to reduce test flakiness caused by sender pod retry sending events (which will cause unexpected response code). + // We should remove it after https://github.com/google/knative-gcp/issues/1058 lands + if code == http.StatusForbidden || code == http.StatusServiceUnavailable || code == http.StatusInternalServerError { + continue + } + if code != http.StatusAccepted { return fmt.Errorf("metric has unexpected response code: %v", ts.GetMetric()) }
1
/* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lib import ( "context" "fmt" "net/http" "strconv" "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/golang/protobuf/ptypes" "github.com/google/go-cmp/cmp" "github.com/google/knative-gcp/test/e2e/lib/metrics" "google.golang.org/api/iterator" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) type BrokerMetricAssertion struct { ProjectID string BrokerName string BrokerNamespace string StartTime time.Time CountPerType map[string]int64 } func (a BrokerMetricAssertion) Assert(client *monitoring.MetricClient) error { ctx := context.Background() start, err := ptypes.TimestampProto(a.StartTime) if err != nil { return err } end, err := ptypes.TimestampProto(time.Now()) if err != nil { return err } it := client.ListTimeSeries(ctx, &monitoringpb.ListTimeSeriesRequest{ Name: fmt.Sprintf("projects/%s", a.ProjectID), Filter: a.StackdriverFilter(), Interval: &monitoringpb.TimeInterval{StartTime: start, EndTime: end}, View: monitoringpb.ListTimeSeriesRequest_FULL, }) gotCount := make(map[string]int64) for { ts, err := it.Next() if err == iterator.Done { break } if err != nil { return err } labels := ts.GetMetric().GetLabels() eventType := labels["event_type"] code, err := strconv.Atoi(labels["response_code"]) if err != nil { return fmt.Errorf("metric has invalid response code label: %v", ts.GetMetric()) } if code != http.StatusAccepted { return fmt.Errorf("metric has unexpected response code: %v", ts.GetMetric()) } gotCount[eventType] = gotCount[eventType] + metrics.SumCumulative(ts) } if diff := cmp.Diff(a.CountPerType, gotCount); diff != "" { return fmt.Errorf("unexpected broker metric count (-want, +got) = %v", diff) } return nil } func (a BrokerMetricAssertion) StackdriverFilter() string { filter := map[string]interface{}{ "metric.type": BrokerEventCountMetricType, "resource.type": BrokerMetricResourceType, "resource.label.namespace_name": a.BrokerNamespace, "resource.label.broker_name": a.BrokerName, } return metrics.StringifyStackDriverFilter(filter) }
1
17,643
I think we would expect StatusNotFound instead of StatusForbidden?
google-knative-gcp
go
@@ -241,9 +241,11 @@ static int rexec_output (flux_subprocess_t *p, bool eof) { json_t *io = NULL; + char rankstr[64]; int rv = -1; - if (!(io = ioencode (stream, s->rank, data, len, eof))) { + snprintf (rankstr, sizeof (rankstr), "%d", s->rank); + if (!(io = ioencode (stream, rankstr, data, len, eof))) { flux_log_error (s->h, "%s: ioencode", __FUNCTION__); goto error; }
1
/************************************************************\ * Copyright 2018 Lawrence Livermore National Security, LLC * (c.f. AUTHORS, NOTICE.LLNS, COPYING) * * This file is part of the Flux resource manager framework. * For details, see https://github.com/flux-framework. * * SPDX-License-Identifier: LGPL-3.0 \************************************************************/ #if HAVE_CONFIG_H # include "config.h" #endif #include <sys/types.h> #include <wait.h> #include <unistd.h> #include <errno.h> #include <czmq.h> #include <sodium.h> #include <flux/core.h> #include "src/common/libutil/errno_safe.h" #include "src/common/libutil/log.h" #include "src/common/libutil/fdwalk.h" #include "src/common/libutil/macros.h" #include "src/common/libioencode/ioencode.h" #include "subprocess.h" #include "subprocess_private.h" #include "command.h" #include "remote.h" #include "server.h" #include "util.h" static const char *auxkey = "flux::rexec"; struct rexec { const flux_msg_t *msg; // rexec request message flux_subprocess_server_t *s; // server context }; static void rexec_destroy (struct rexec *rex) { if (rex) { flux_msg_decref (rex->msg); ERRNO_SAFE_WRAP (free, rex); } } static struct rexec *rexec_create (const flux_msg_t *msg, flux_subprocess_server_t *s) { struct rexec *rex; if ((rex = calloc (1, sizeof (*rex)))) { rex->msg = flux_msg_incref (msg); rex->s = s; } return rex; } static void subprocesses_free_fn (void *arg) { flux_subprocess_t *p = arg; flux_subprocess_unref (p); } static int store_pid (flux_subprocess_server_t *s, flux_subprocess_t *p) { pid_t pid = flux_subprocess_pid (p); char *str = NULL; int rv = -1; void *ret = NULL; if (asprintf (&str, "%d", pid) < 0) { flux_log_error (s->h, "%s: asprintf", __FUNCTION__); goto cleanup; } if (zhash_insert (s->subprocesses, str, p) < 0) { flux_log_error (s->h, "%s: zhash_insert", __FUNCTION__); goto cleanup; } ret = zhash_freefn (s->subprocesses, str, subprocesses_free_fn); assert (ret); rv = 0; cleanup: free (str); return rv; } static void remove_pid (flux_subprocess_server_t *s, flux_subprocess_t *p) { pid_t pid = flux_subprocess_pid (p); char *str = NULL; if (asprintf (&str, "%d", pid) < 0) { flux_log_error (s->h, "%s: asprintf", __FUNCTION__); goto cleanup; } zhash_delete (s->subprocesses, str); if (!zhash_size (s->subprocesses) && s->terminate_prep_w) { flux_watcher_start (s->terminate_prep_w); flux_watcher_start (s->terminate_check_w); } cleanup: free (str); } static flux_subprocess_t *lookup_pid (flux_subprocess_server_t *s, pid_t pid) { flux_subprocess_t *p = NULL; char *str = NULL; int save_errno; if (asprintf (&str, "%d", pid) < 0) goto cleanup; if (!(p = zhash_lookup (s->subprocesses, str))) { errno = ENOENT; goto cleanup; } cleanup: save_errno = errno; free (str); errno = save_errno; return p; } static void subprocess_cleanup (flux_subprocess_t *p) { struct rexec *rex = flux_subprocess_aux_get (p, auxkey); assert (rex != NULL); remove_pid (rex->s, p); } static void rexec_completion_cb (flux_subprocess_t *p) { struct rexec *rex = flux_subprocess_aux_get (p, auxkey); assert (rex != NULL); if (p->state != FLUX_SUBPROCESS_FAILED) { /* no fallback if this fails */ if (flux_respond_pack (rex->s->h, rex->msg, "{s:s s:i}", "type", "complete", "rank", rex->s->rank) < 0) flux_log_error (rex->s->h, "%s: flux_respond_pack", __FUNCTION__); } subprocess_cleanup (p); } static void internal_fatal (flux_subprocess_server_t *s, flux_subprocess_t *p) { if (p->state == FLUX_SUBPROCESS_FAILED) return; /* report of state change handled through typical state change * callback. Normally cleanup occurs through completion of local * subprocess. */ p->state = FLUX_SUBPROCESS_FAILED; p->failed_errno = errno; state_change_start (p); /* if we fail here, probably not much can be done */ if (killpg (p->pid, SIGKILL) < 0) { if (errno != ESRCH) flux_log_error (s->h, "%s: kill", __FUNCTION__); } } static void rexec_state_change_cb (flux_subprocess_t *p, flux_subprocess_state_t state) { struct rexec *rex = flux_subprocess_aux_get (p, auxkey); assert (rex != NULL); if (state == FLUX_SUBPROCESS_RUNNING) { if (store_pid (rex->s, p) < 0) goto error; if (flux_respond_pack (rex->s->h, rex->msg, "{s:s s:i s:i s:i}", "type", "state", "rank", rex->s->rank, "pid", flux_subprocess_pid (p), "state", state) < 0) { flux_log_error (rex->s->h, "%s: flux_respond_pack", __FUNCTION__); goto error; } } else if (state == FLUX_SUBPROCESS_EXITED) { if (flux_respond_pack (rex->s->h, rex->msg, "{s:s s:i s:i s:i}", "type", "state", "rank", rex->s->rank, "state", state, "status", flux_subprocess_status (p)) < 0) { flux_log_error (rex->s->h, "%s: flux_respond_pack", __FUNCTION__); goto error; } } else if (state == FLUX_SUBPROCESS_FAILED) { if (flux_respond_pack (rex->s->h, rex->msg, "{s:s s:i s:i s:i}", "type", "state", "rank", rex->s->rank, "state", FLUX_SUBPROCESS_FAILED, "errno", p->failed_errno) < 0) { flux_log_error (rex->s->h, "%s: flux_respond_pack", __FUNCTION__); goto error; } subprocess_cleanup (p); } else { errno = EPROTO; flux_log_error (rex->s->h, "%s: illegal state", __FUNCTION__); goto error; } return; error: internal_fatal (rex->s, p); } static int rexec_output (flux_subprocess_t *p, const char *stream, flux_subprocess_server_t *s, const flux_msg_t *msg, const char *data, int len, bool eof) { json_t *io = NULL; int rv = -1; if (!(io = ioencode (stream, s->rank, data, len, eof))) { flux_log_error (s->h, "%s: ioencode", __FUNCTION__); goto error; } if (flux_respond_pack (s->h, msg, "{s:s s:i s:i s:O}", "type", "output", "rank", s->rank, "pid", flux_subprocess_pid (p), "io", io) < 0) { flux_log_error (s->h, "%s: flux_respond_pack", __FUNCTION__); goto error; } rv = 0; error: json_decref (io); return rv; } static void rexec_output_cb (flux_subprocess_t *p, const char *stream) { struct rexec *rex = flux_subprocess_aux_get (p, auxkey); const char *ptr; int lenp; assert (rex != NULL); if (!(ptr = flux_subprocess_read (p, stream, -1, &lenp))) { flux_log_error (rex->s->h, "%s: flux_subprocess_read", __FUNCTION__); goto error; } if (lenp) { if (rexec_output (p, stream, rex->s, rex->msg, ptr, lenp, false) < 0) goto error; } else { if (rexec_output (p, stream, rex->s, rex->msg, NULL, 0, true) < 0) goto error; } return; error: internal_fatal (rex->s, p); } static void server_exec_cb (flux_t *h, flux_msg_handler_t *mh, const flux_msg_t *msg, void *arg) { flux_subprocess_server_t *s = arg; const char *cmd_str; flux_cmd_t *cmd = NULL; struct rexec *rex; flux_subprocess_t *p = NULL; flux_subprocess_ops_t ops = { .on_completion = rexec_completion_cb, .on_state_change = rexec_state_change_cb, .on_channel_out = rexec_output_cb, .on_stdout = rexec_output_cb, .on_stderr = rexec_output_cb, }; int on_channel_out, on_stdout, on_stderr; char **env = NULL; if (flux_request_unpack (msg, NULL, "{s:s s:i s:i s:i}", "cmd", &cmd_str, "on_channel_out", &on_channel_out, "on_stdout", &on_stdout, "on_stderr", &on_stderr)) goto error; if (!on_channel_out) ops.on_channel_out = NULL; if (!on_stdout) ops.on_stdout = NULL; if (!on_stderr) ops.on_stderr = NULL; if (!(cmd = flux_cmd_fromjson (cmd_str, NULL))) goto error; if (!flux_cmd_argc (cmd)) { errno = EPROTO; goto error; } if (!flux_cmd_getcwd (cmd)) { errno = EPROTO; goto error; } if (!(env = flux_cmd_env_expand (cmd))) goto error; /* if no environment sent, use local server environment */ if (env[0] == NULL) { if (flux_cmd_set_env (cmd, environ) < 0) { flux_log_error (s->h, "%s: flux_cmd_set_env", __FUNCTION__); goto error; } } if (flux_cmd_setenvf (cmd, 1, "FLUX_URI", "%s", s->local_uri) < 0) goto error; if (flux_respond_pack (s->h, msg, "{s:s s:i}", "type", "start", "rank", s->rank) < 0) { flux_log_error (s->h, "%s: flux_respond_pack", __FUNCTION__); goto error; } if (!(p = flux_exec (s->h, FLUX_SUBPROCESS_FLAGS_SETPGRP, cmd, &ops, NULL))) { /* error here, generate FLUX_SUBPROCESS_EXEC_FAILED state */ if (flux_respond_pack (h, msg, "{s:s s:i s:i s:i}", "type", "state", "rank", s->rank, "state", FLUX_SUBPROCESS_EXEC_FAILED, "errno", errno) < 0) { flux_log_error (h, "%s: flux_respond_pack", __FUNCTION__); goto error; } goto cleanup; } if (!(rex = rexec_create (msg, s))) goto error; if (flux_subprocess_aux_set (p, auxkey, rex, (flux_free_f)rexec_destroy) < 0) { rexec_destroy (rex); goto error; } flux_cmd_destroy (cmd); free (env); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "%s: flux_respond_error", __FUNCTION__); cleanup: flux_cmd_destroy (cmd); free (env); flux_subprocess_unref (p); } static int write_subprocess (flux_subprocess_server_t *s, flux_subprocess_t *p, const char *stream, const char *data, int len) { int tmp; if ((tmp = flux_subprocess_write (p, stream, data, len)) < 0) { flux_log_error (s->h, "%s: flux_subprocess_write", __FUNCTION__); return -1; } /* add list of msgs if there is overflow? */ if (tmp != len) { flux_log_error (s->h, "channel buffer error: rank = %d pid = %d, stream = %s, len = %d", s->rank, flux_subprocess_pid (p), stream, len); errno = EOVERFLOW; return -1; } return 0; } static int close_subprocess (flux_subprocess_server_t *s, flux_subprocess_t *p, const char *stream) { if (flux_subprocess_close (p, stream) < 0) { flux_log_error (s->h, "%s: flux_subprocess_close", __FUNCTION__); return -1; } return 0; } static void server_write_cb (flux_t *h, flux_msg_handler_t *mh, const flux_msg_t *msg, void *arg) { flux_subprocess_t *p; flux_subprocess_server_t *s = arg; const char *stream = NULL; char *data = NULL; int len = 0; bool eof = false; pid_t pid; json_t *io = NULL; if (flux_request_unpack (msg, NULL, "{ s:i s:o }", "pid", &pid, "io", &io) < 0) { /* can't handle error, no pid to sent errno back to, so just * return */ flux_log_error (s->h, "%s: flux_request_unpack", __FUNCTION__); return; } if (iodecode (io, &stream, NULL, &data, &len, &eof) < 0) { flux_log_error (s->h, "%s: iodecode", __FUNCTION__); return; } if (!(p = lookup_pid (s, pid))) { /* can't handle error, no pid to send errno back to, so just * return * * It's common on EOF to be sent and server has already * removed process from hash. Don't output error in that * case. */ if (!(errno == ENOENT && eof)) flux_log_error (s->h, "%s: lookup_pid", __FUNCTION__); goto out; } /* Chance subprocess exited/killed/etc. since user write request * was sent. */ if (p->state != FLUX_SUBPROCESS_RUNNING) goto out; if (data && len) { if (write_subprocess (s, p, stream, data, len) < 0) goto error; } if (eof) { if (close_subprocess (s, p, stream) < 0) goto error; } out: free (data); return; error: free (data); internal_fatal (s, p); } static void server_signal_cb (flux_t *h, flux_msg_handler_t *mh, const flux_msg_t *msg, void *arg) { flux_subprocess_server_t *s = arg; pid_t pid; int signum; errno = 0; if (flux_request_unpack (msg, NULL, "{ s:i s:i }", "pid", &pid, "signum", &signum) < 0) { flux_log_error (s->h, "%s: flux_request_unpack", __FUNCTION__); errno = EPROTO; goto error; } if (!lookup_pid (s, pid)) goto error; if (killpg (pid, signum) < 0) { flux_log_error (s->h, "kill"); goto error; } if (flux_respond (h, msg, NULL) < 0) flux_log_error (h, "%s: flux_respond", __FUNCTION__); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "%s: flux_respond_error", __FUNCTION__); } char *subprocess_sender (flux_subprocess_t *p) { struct rexec *rex = flux_subprocess_aux_get (p, auxkey); char *sender; if (!rex || flux_msg_get_route_first (rex->msg, &sender) < 0) return NULL; return sender; } static json_t *process_info (flux_subprocess_t *p) { flux_cmd_t *cmd; char *cmd_str = NULL; char *sender = NULL; json_t *info = NULL; if (!(cmd = flux_subprocess_get_cmd (p))) goto cleanup; if (!(cmd_str = flux_cmd_tojson (cmd))) goto cleanup; if (!(sender = subprocess_sender (p))) { errno = ENOENT; goto cleanup; } /* very limited returned, just for testing */ if (!(info = json_pack ("{s:i s:s}", "pid", flux_subprocess_pid (p), "sender", sender))) { errno = ENOMEM; goto cleanup; } cleanup: free (sender); free (cmd_str); return info; } static void server_processes_cb (flux_t *h, flux_msg_handler_t *mh, const flux_msg_t *msg, void *arg) { flux_subprocess_server_t *s = arg; flux_subprocess_t *p; json_t *procs = NULL; if (!(procs = json_array ())) { errno = ENOMEM; goto error; } p = zhash_first (s->subprocesses); while (p) { json_t *o = NULL; if (!(o = process_info (p)) || json_array_append_new (procs, o) < 0) { json_decref (o); errno = ENOMEM; goto error; } p = zhash_next (s->subprocesses); } if (flux_respond_pack (h, msg, "{s:i s:o}", "rank", s->rank, "procs", procs) < 0) flux_log_error (h, "%s: flux_respond_pack", __FUNCTION__); return; error: if (flux_respond_error (h, msg, errno, NULL) < 0) flux_log_error (h, "%s: flux_respond_error", __FUNCTION__); json_decref (procs); } int server_start (flux_subprocess_server_t *s, const char *prefix) { /* rexec.processes is primarily for testing */ struct flux_msg_handler_spec htab[] = { { FLUX_MSGTYPE_REQUEST, "rexec", server_exec_cb, 0 }, { FLUX_MSGTYPE_REQUEST, "rexec.write", server_write_cb, 0 }, { FLUX_MSGTYPE_REQUEST, "rexec.signal", server_signal_cb, 0 }, { FLUX_MSGTYPE_REQUEST, "rexec.processes", server_processes_cb, 0 }, FLUX_MSGHANDLER_TABLE_END, }; char *topic_globs[4] = {NULL, NULL, NULL, NULL}; int rv = -1; assert (prefix); if (asprintf (&topic_globs[0], "%s.rexec", prefix) < 0) goto cleanup; if (asprintf (&topic_globs[1], "%s.rexec.write", prefix) < 0) goto cleanup; if (asprintf (&topic_globs[2], "%s.rexec.signal", prefix) < 0) goto cleanup; if (asprintf (&topic_globs[3], "%s.rexec.processes", prefix) < 0) goto cleanup; htab[0].topic_glob = (const char *)topic_globs[0]; htab[1].topic_glob = (const char *)topic_globs[1]; htab[2].topic_glob = (const char *)topic_globs[2]; htab[3].topic_glob = (const char *)topic_globs[3]; if (flux_msg_handler_addvec (s->h, htab, s, &s->handlers) < 0) goto cleanup; rv = 0; cleanup: free (topic_globs[0]); free (topic_globs[1]); free (topic_globs[2]); free (topic_globs[3]); return rv; } void server_stop (flux_subprocess_server_t *s) { flux_msg_handler_delvec (s->handlers); } static void server_signal_subprocess (flux_subprocess_t *p, int signum) { flux_future_t *f; if (!(f = flux_subprocess_kill (p, signum))) { struct rexec *rex = flux_subprocess_aux_get (p, auxkey); flux_log_error (rex->s->h, "%s: flux_subprocess_kill", __FUNCTION__); return; } flux_future_destroy (f); } int server_signal_subprocesses (flux_subprocess_server_t *s, int signum) { flux_subprocess_t *p; p = zhash_first (s->subprocesses); while (p) { server_signal_subprocess (p, signum); p = zhash_next (s->subprocesses); } return 0; } int server_terminate_subprocesses (flux_subprocess_server_t *s) { server_signal_subprocesses (s, SIGKILL); return 0; } static void terminate_uuid (flux_subprocess_t *p, const char *id) { char *sender; if (!(sender = subprocess_sender (p))) return; if (!strcmp (id, sender)) server_signal_subprocess (p, SIGKILL); free (sender); } int server_terminate_by_uuid (flux_subprocess_server_t *s, const char *id) { flux_subprocess_t *p; p = zhash_first (s->subprocesses); while (p) { terminate_uuid (p, id); p = zhash_next (s->subprocesses); } return 0; } static void terminate_prep_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { flux_subprocess_server_t *s = arg; flux_watcher_start (s->terminate_idle_w); } static void terminate_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { flux_subprocess_server_t *s = arg; flux_watcher_stop (s->terminate_timer_w); flux_watcher_stop (s->terminate_prep_w); flux_watcher_stop (s->terminate_idle_w); flux_watcher_stop (s->terminate_check_w); flux_reactor_stop (s->r); } void server_terminate_cleanup (flux_subprocess_server_t *s) { flux_watcher_destroy (s->terminate_timer_w); flux_watcher_destroy (s->terminate_prep_w); flux_watcher_destroy (s->terminate_idle_w); flux_watcher_destroy (s->terminate_check_w); s->terminate_timer_w = NULL; s->terminate_prep_w = NULL; s->terminate_idle_w = NULL; s->terminate_check_w = NULL; } int server_terminate_setup (flux_subprocess_server_t *s, double wait_time) { s->terminate_timer_w = flux_timer_watcher_create (s->r, wait_time, 0., terminate_cb, s); if (!s->terminate_timer_w) { flux_log_error (s->h, "flux_timer_watcher_create"); goto error; } if (s->terminate_prep_w) return 0; s->terminate_prep_w = flux_prepare_watcher_create (s->r, terminate_prep_cb, s); if (!s->terminate_prep_w) { flux_log_error (s->h, "flux_prepare_watcher_create"); goto error; } s->terminate_idle_w = flux_idle_watcher_create (s->r, NULL, s); if (!s->terminate_idle_w) { flux_log_error (s->h, "flux_idle_watcher_create"); goto error; } s->terminate_check_w = flux_check_watcher_create (s->r, terminate_cb, s); if (!s->terminate_check_w) { flux_log_error (s->h, "flux_check_watcher_create"); goto error; } return 0; error: server_terminate_cleanup (s); return -1; } int server_terminate_wait (flux_subprocess_server_t *s) { flux_watcher_start (s->terminate_timer_w); if (flux_reactor_run (s->r, 0) < 0) { flux_log_error (s->h, "flux_reactor_run"); return -1; } return 0; } /* * vi: ts=4 sw=4 expandtab */
1
24,731
Since this is a recurring theme, would it make sense to have an ioencode interface for it like `ioencode_rank()` that takes an integer rank like before?
flux-framework-flux-core
c
@@ -192,8 +192,10 @@ func contains(capabilities []string, capability string) bool { // object func (agent *ecsAgent) initializeResourceFields() { agent.resourceFields = &taskresource.ResourceFields{ - Control: cgroup.New(), - IOUtil: ioutilwrapper.NewIOUtil(), + Control: cgroup.New(), + IOUtil: ioutilwrapper.NewIOUtil(), + Ctx: agent.ctx, + DockerClient: agent.dockerClient, } }
1
// +build linux // Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package app import ( "fmt" "net/http" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/ec2" "github.com/aws/amazon-ecs-agent/agent/ecscni" "github.com/aws/amazon-ecs-agent/agent/engine" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/eni/pause" "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper" "github.com/aws/amazon-ecs-agent/agent/eni/watcher" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/taskresource" cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" "github.com/cihub/seelog" "github.com/pkg/errors" ) // initPID defines the process identifier for the init process const initPID = 1 // awsVPCCNIPlugins is a list of CNI plugins required by the ECS Agent // to configure the ENI for a task var awsVPCCNIPlugins = []string{ecscni.ECSENIPluginName, ecscni.ECSBridgePluginName, ecscni.ECSIPAMPluginName, } // startWindowsService is not supported on Linux func (agent *ecsAgent) startWindowsService() int { seelog.Error("Windows Services are not supported on Linux") return 1 } // initializeTaskENIDependencies initializes all of the dependencies required by // the Agent to support the 'awsvpc' networking mode. A non nil error is returned // if an error is encountered during this process. An additional boolean flag to // indicate if this error is considered terminal is also returned func (agent *ecsAgent) initializeTaskENIDependencies(state dockerstate.TaskEngineState, taskEngine engine.TaskEngine) (error, bool) { // Check if the Agent process's pid == 1, which means it's running without an init system if agent.os.Getpid() == initPID { // This is a terminal error. Bad things happen with invoking the // the ENI plugin when there's no init process in the pid namesapce. // Specifically, the DHClient processes that are started as children // of the Agent will not be reaped leading to the ENI device // disappearing until the Agent is killed. return errors.New("agent is not started with an init system"), true } // Set VPC and Subnet IDs for the instance if err, ok := agent.setVPCSubnet(); err != nil { return err, ok } // Validate that the CNI plugins exist in the expected path and that // they possess the right capabilities if err := agent.verifyCNIPluginsCapabilities(); err != nil { // An error here is terminal as it means that the plugins // do not support the ENI capability return err, true } if agent.cfg.ShouldLoadPauseContainerTarball() { // Load the pause container's image from the 'disk' if _, err := agent.pauseLoader.LoadImage(agent.ctx, agent.cfg, agent.dockerClient); err != nil { if pause.IsNoSuchFileError(err) || pause.UnsupportedPlatform(err) { // If the pause container's image tarball doesn't exist or if the // invocation is done for an unsupported platform, we cannot recover. // Return the error as terminal for these cases return err, true } return err, false } } if err := agent.startUdevWatcher(state, taskEngine.StateChangeEvents()); err != nil { // If udev watcher was not initialized in this run because of the udev socket // file not being available etc, the Agent might be able to retry and succeed // on the next run. Hence, returning a false here for terminal bool return err, false } return nil, false } // setVPCSubnet sets the vpc and subnet ids for the agent by querying the // instance metadata service func (agent *ecsAgent) setVPCSubnet() (error, bool) { mac, err := agent.ec2MetadataClient.PrimaryENIMAC() if err != nil { return fmt.Errorf("unable to get mac address of instance's primary ENI from instance metadata: %v", err), false } vpcID, err := agent.ec2MetadataClient.VPCID(mac) if err != nil { if isInstanceLaunchedInVPC(err) { return fmt.Errorf("unable to get vpc id from instance metadata: %v", err), true } return instanceNotLaunchedInVPCError, false } subnetID, err := agent.ec2MetadataClient.SubnetID(mac) if err != nil { return fmt.Errorf("unable to get subnet id from instance metadata: %v", err), false } agent.vpc = vpcID agent.subnet = subnetID agent.mac = mac return nil, false } // isInstanceLaunchedInVPC returns false when the http status code is set to // 'not found' (404) when querying the vpc id from instance metadata func isInstanceLaunchedInVPC(err error) bool { if metadataErr, ok := err.(*ec2.MetadataError); ok && metadataErr.GetStatusCode() == http.StatusNotFound { return false } return true } // verifyCNIPluginsCapabilities returns an error if there's an error querying // capabilities or if the required capability is absent from the capabilities // of the following plugins: // a. ecs-eni // b. ecs-bridge // c. ecs-ipam func (agent *ecsAgent) verifyCNIPluginsCapabilities() error { // Check if we can get capabilities from each plugin for _, plugin := range awsVPCCNIPlugins { capabilities, err := agent.cniClient.Capabilities(plugin) if err != nil { return err } if !contains(capabilities, ecscni.CapabilityAWSVPCNetworkingMode) { return errors.Errorf("plugin '%s' doesn't support the capability: %s", plugin, ecscni.CapabilityAWSVPCNetworkingMode) } } return nil } // startUdevWatcher starts the udev monitor and the watcher for receiving // notifications from the monitor func (agent *ecsAgent) startUdevWatcher(state dockerstate.TaskEngineState, stateChangeEvents chan<- statechange.Event) error { seelog.Debug("Setting up ENI Watcher") udevMonitor, err := udevwrapper.New() if err != nil { return errors.Wrapf(err, "unable to create udev monitor") } // Create Watcher eniWatcher := watcher.New(agent.ctx, agent.mac, udevMonitor, state, stateChangeEvents) if err := eniWatcher.Init(); err != nil { return errors.Wrapf(err, "unable to initialize eni watcher") } go eniWatcher.Start() return nil } func contains(capabilities []string, capability string) bool { for _, cap := range capabilities { if cap == capability { return true } } return false } // initializeResourceFields exists mainly for testing doStart() to use mock Control // object func (agent *ecsAgent) initializeResourceFields() { agent.resourceFields = &taskresource.ResourceFields{ Control: cgroup.New(), IOUtil: ioutilwrapper.NewIOUtil(), } } func (agent *ecsAgent) cgroupInit() error { err := agent.resourceFields.Control.Init() // When task CPU and memory limits are enabled, all tasks are placed // under the '/ecs' cgroup root. if err == nil { return nil } if agent.cfg.TaskCPUMemLimit == config.ExplicitlyEnabled { return errors.Wrapf(err, "unable to setup '/ecs' cgroup") } seelog.Warnf("Disabling TaskCPUMemLimit because agent is unabled to setup '/ecs' cgroup: %v", err) agent.cfg.TaskCPUMemLimit = config.ExplicitlyDisabled return nil }
1
20,131
Is it a concern that we initialize these fields irrespective of whether resources like cgroup/volumes are enabled or not?
aws-amazon-ecs-agent
go
@@ -109,10 +109,13 @@ func NewDispatcher(cfg Config) Dispatcher { // convertOutbounds applys outbound middleware and creates validator outbounds func convertOutbounds(outbounds Outbounds, middleware OutboundMiddleware) Outbounds { - //TODO(apb): ensure we're not given the same underlying outbound for each RPC type convertedOutbounds := make(Outbounds, len(outbounds)) for service, outs := range outbounds { + if outs.Unary == nil && outs.Oneway == nil { + panic(fmt.Sprintf("no outbound set for service %q in dispatcher", service)) + } + var ( unaryOutbound transport.UnaryOutbound onewayOutbound transport.OnewayOutbound
1
// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package yarpc import ( "fmt" "sync" "go.uber.org/yarpc/internal/clientconfig" "go.uber.org/yarpc/internal/errors" "go.uber.org/yarpc/internal/request" intsync "go.uber.org/yarpc/internal/sync" "go.uber.org/yarpc/transport" "github.com/opentracing/opentracing-go" ) // Dispatcher object is used to configure a YARPC application; it is used by // Clients to send RPCs, and by Procedures to recieve them. This object is what // enables an application to be transport-agnostic. type Dispatcher interface { transport.Registrar transport.ClientConfigProvider // Inbounds returns a copy of the list of inbounds for this RPC object. // // The Inbounds will be returned in the same order that was used in the // configuration. Inbounds() Inbounds // Starts the RPC allowing it to accept and processing new incoming // requests. // // Blocks until the RPC is ready to start accepting new requests. Start() error // Stops the RPC. No new requests will be accepted. // // Blocks until the RPC has stopped. Stop() error } // Config specifies the parameters of a new RPC constructed via New. type Config struct { Name string Inbounds Inbounds Outbounds Outbounds // Inbound and Outbound Middleware that will be applied to all incoming and // outgoing requests respectively. InboundMiddleware InboundMiddleware OutboundMiddleware OutboundMiddleware // Tracer is deprecated. The dispatcher does nothing with this propery. Tracer opentracing.Tracer } // Inbounds contains a list of inbound transports type Inbounds []transport.Inbound // Outbounds encapsulates a service and its outbounds type Outbounds map[string]transport.Outbounds // OutboundMiddleware contains the different type of outbound middleware type OutboundMiddleware struct { Unary transport.UnaryOutboundMiddleware Oneway transport.OnewayOutboundMiddleware } // InboundMiddleware contains the different type of inbound middleware type InboundMiddleware struct { Unary transport.UnaryInboundMiddleware Oneway transport.OnewayInboundMiddleware } // NewDispatcher builds a new Dispatcher using the specified Config. func NewDispatcher(cfg Config) Dispatcher { if cfg.Name == "" { panic("a service name is required") } return dispatcher{ Name: cfg.Name, Registrar: transport.NewMapRegistry(cfg.Name), inbounds: cfg.Inbounds, outbounds: convertOutbounds(cfg.Outbounds, cfg.OutboundMiddleware), InboundMiddleware: cfg.InboundMiddleware, } } // convertOutbounds applys outbound middleware and creates validator outbounds func convertOutbounds(outbounds Outbounds, middleware OutboundMiddleware) Outbounds { //TODO(apb): ensure we're not given the same underlying outbound for each RPC type convertedOutbounds := make(Outbounds, len(outbounds)) for service, outs := range outbounds { var ( unaryOutbound transport.UnaryOutbound onewayOutbound transport.OnewayOutbound ) // apply outbound middleware and create ValidatorOutbounds if outs.Unary != nil { unaryOutbound = transport.ApplyUnaryOutboundMiddleware(outs.Unary, middleware.Unary) unaryOutbound = request.UnaryValidatorOutbound{UnaryOutbound: unaryOutbound} } if outs.Oneway != nil { onewayOutbound = transport.ApplyOnewayOutboundMiddleware(outs.Oneway, middleware.Oneway) onewayOutbound = request.OnewayValidatorOutbound{OnewayOutbound: outs.Oneway} } convertedOutbounds[service] = transport.Outbounds{ Unary: unaryOutbound, Oneway: onewayOutbound, } } return convertedOutbounds } // dispatcher is the standard RPC implementation. // // It allows use of multiple Inbounds and Outbounds together. type dispatcher struct { transport.Registrar Name string inbounds Inbounds outbounds Outbounds InboundMiddleware InboundMiddleware } func (d dispatcher) Inbounds() Inbounds { inbounds := make(Inbounds, len(d.inbounds)) copy(inbounds, d.inbounds) return inbounds } func (d dispatcher) ClientConfig(service string) transport.ClientConfig { if rs, ok := d.outbounds[service]; ok { return clientconfig.MultiOutbound(d.Name, service, rs) } panic(noOutboundForService{Service: service}) } func (d dispatcher) Start() error { var ( mu sync.Mutex startedInbounds []transport.Inbound startedOutbounds []transport.Outbound ) startInbound := func(i transport.Inbound) func() error { return func() error { if err := i.Start(); err != nil { return err } mu.Lock() startedInbounds = append(startedInbounds, i) mu.Unlock() return nil } } startOutbound := func(o transport.Outbound) func() error { return func() error { if o == nil { return nil } if err := o.Start(); err != nil { return err } mu.Lock() startedOutbounds = append(startedOutbounds, o) mu.Unlock() return nil } } var wait intsync.ErrorWaiter for _, i := range d.inbounds { i.SetRegistry(d) wait.Submit(startInbound(i)) } // TODO record the name of the service whose outbound failed for _, o := range d.outbounds { wait.Submit(startOutbound(o.Unary)) wait.Submit(startOutbound(o.Oneway)) } errs := wait.Wait() if len(errs) == 0 { return nil } // Failed to start so stop everything that was started. wait = intsync.ErrorWaiter{} for _, i := range startedInbounds { wait.Submit(i.Stop) } for _, o := range startedOutbounds { wait.Submit(o.Stop) } if newErrors := wait.Wait(); len(newErrors) > 0 { errs = append(errs, newErrors...) } return errors.ErrorGroup(errs) } func (d dispatcher) Register(rs []transport.Registrant) { registrants := make([]transport.Registrant, 0, len(rs)) for _, r := range rs { switch r.HandlerSpec.Type() { case transport.Unary: h := transport.ApplyUnaryInboundMiddleware(r.HandlerSpec.Unary(), d.InboundMiddleware.Unary) r.HandlerSpec = transport.NewUnaryHandlerSpec(h) case transport.Oneway: h := transport.ApplyOnewayInboundMiddleware(r.HandlerSpec.Oneway(), d.InboundMiddleware.Oneway) r.HandlerSpec = transport.NewOnewayHandlerSpec(h) default: panic(fmt.Sprintf("unknown handler type %q for service %q, procedure %q", r.HandlerSpec.Type(), r.Service, r.Procedure)) } registrants = append(registrants, r) } d.Registrar.Register(registrants) } func (d dispatcher) Stop() error { var wait intsync.ErrorWaiter for _, i := range d.inbounds { wait.Submit(i.Stop) } for _, o := range d.outbounds { if o.Unary != nil { wait.Submit(o.Unary.Stop) } if o.Oneway != nil { wait.Submit(o.Oneway.Stop) } } if errs := wait.Wait(); len(errs) > 0 { return errors.ErrorGroup(errs) } return nil }
1
11,640
Wouldn't this be the first panic in yarpc? What do we do for transport validation? Return errors?
yarpc-yarpc-go
go
@@ -26,6 +26,7 @@ import ( type processor interface { process(ctx context.Context) error traceLogs() []string + cancel(reason string) } // processorProvider allows the processor to be determined after the pd has been inflated.
1
// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package importer import ( "context" ) // processor represents the second (and final) phase of import. For bootable // disks, this means translation and publishing the final image. For data // disks, this means publishing the image. // // Implementers can expose detailed logs using the traceLogs() method. type processor interface { process(ctx context.Context) error traceLogs() []string } // processorProvider allows the processor to be determined after the pd has been inflated. type processorProvider interface { provide(pd persistentDisk) (processor, error) } type defaultProcessorProvider struct { ImportArguments imageClient createImageClient } func (d defaultProcessorProvider) provide(pd persistentDisk) (processor, error) { if d.DataDisk { return newDataDiskProcessor(pd, d.imageClient, d.Project, d.Labels, d.StorageLocation, d.Description, d.Family, d.ImageName), nil } return newBootableDiskProcessor(d.ImportArguments, pd) }
1
11,422
have you envisioned other cancellation reasons?
GoogleCloudPlatform-compute-image-tools
go
@@ -395,8 +395,10 @@ final class SonataMediaExtension extends Extension implements PrependExtensionIn /** * Checks if the classification of media is enabled. * - * @param array<string, class-string> $bundles - * @param array<string, mixed> $config + * @param array<string, string> $bundles + * @param array<string, mixed> $config + * + * @phpstan-param array<string, class-string> $bundles */ private function isClassificationEnabled(array $bundles, array $config): bool {
1
<?php declare(strict_types=1); /* * This file is part of the Sonata Project package. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\DependencyInjection; use Sonata\Doctrine\Mapper\Builder\OptionsBuilder; use Sonata\Doctrine\Mapper\DoctrineCollector; use Sonata\MediaBundle\CDN\CloudFrontVersion3; use Symfony\Component\Config\Definition\Processor; use Symfony\Component\Config\FileLocator; use Symfony\Component\DependencyInjection\ContainerBuilder; use Symfony\Component\DependencyInjection\Extension\PrependExtensionInterface; use Symfony\Component\DependencyInjection\Loader\PhpFileLoader; use Symfony\Component\DependencyInjection\Reference; use Symfony\Component\HttpKernel\DependencyInjection\Extension; /** * @author Thomas Rabaix <[email protected]> */ final class SonataMediaExtension extends Extension implements PrependExtensionInterface { /** * @var array<string, mixed> */ private $sonataAdminConfig = []; public function load(array $configs, ContainerBuilder $container): void { $processor = new Processor(); $configuration = new Configuration(); $config = $processor->processConfiguration($configuration, $configs); $loader = new PhpFileLoader($container, new FileLocator(__DIR__.'/../Resources/config')); $loader->load('providers.php'); $loader->load('http_client.php'); $loader->load('media.php'); $loader->load('twig.php'); $loader->load('security.php'); $loader->load('extra.php'); $loader->load('form.php'); $loader->load('gaufrette.php'); $loader->load('validators.php'); $loader->load('serializer.php'); $loader->load('commands.php'); $loader->load('controllers.php'); $bundles = $container->getParameter('kernel.bundles'); \assert(\is_array($bundles)); if (isset($bundles['FOSRestBundle'], $bundles['NelmioApiDocBundle'])) { $loader->load(sprintf('api_form_%s.php', $config['db_driver'])); if ('doctrine_orm' === $config['db_driver']) { $loader->load('api_controllers.php'); } } if (isset($bundles['SonataNotificationBundle'])) { $loader->load('consumers.php'); } if (isset($bundles['SonataBlockBundle'])) { $loader->load('block.php'); } if (isset($bundles['SonataSeoBundle'])) { $loader->load('seo_block.php'); } if (!isset($bundles['LiipImagineBundle'])) { $container->removeDefinition('sonata.media.thumbnail.liip_imagine'); } if ($this->isClassificationEnabled($bundles, $config)) { $loader->load('category.php'); } if (!\array_key_exists($config['default_context'], $config['contexts'])) { throw new \InvalidArgumentException(sprintf('SonataMediaBundle - Invalid default context : %s, available : %s', $config['default_context'], json_encode(array_keys($config['contexts'])))); } $loader->load(sprintf('%s.php', $config['db_driver'])); if (isset($bundles['SonataAdminBundle'])) { $loader->load(sprintf('%s_admin.php', $config['db_driver'])); $sonataRoles = []; if (isset($this->sonataAdminConfig['security']['role_admin'])) { $sonataRoles[] = $this->sonataAdminConfig['security']['role_admin']; } else { $sonataRoles[] = 'ROLE_ADMIN'; } if (isset($this->sonataAdminConfig['security']['role_super_admin'])) { $sonataRoles[] = $this->sonataAdminConfig['security']['role_super_admin']; } else { $sonataRoles[] = 'ROLE_SUPER_ADMIN'; } $container->getDefinition('sonata.media.security.superadmin_strategy') ->replaceArgument(2, $sonataRoles); } $this->configureFilesystemAdapter($container, $config); $this->configureCdnAdapter($container, $config); $pool = $container->getDefinition('sonata.media.pool'); $pool->replaceArgument(0, $config['default_context']); $strategies = []; foreach ($config['contexts'] as $name => $settings) { $formats = []; foreach ($settings['formats'] as $format => $value) { $formats[$name.'_'.$format] = $value; } $strategies[] = $settings['download']['strategy']; $pool->addMethodCall('addContext', [$name, $settings['providers'], $formats, $settings['download']]); } $container->setParameter('sonata.media.admin_format', $config['admin_format']); $strategies = array_unique($strategies); foreach ($strategies as $strategyId) { $pool->addMethodCall('addDownloadStrategy', [$strategyId, new Reference($strategyId)]); } if ('doctrine_orm' === $config['db_driver']) { if (!isset($bundles['SonataDoctrineBundle'])) { throw new \RuntimeException('You must register SonataDoctrineBundle to use SonataMediaBundle.'); } $this->registerSonataDoctrineMapping($bundles, $config); } $container->setParameter('sonata.media.resizer.simple.adapter.mode', $config['resizer']['simple']['mode']); $container->setParameter('sonata.media.resizer.square.adapter.mode', $config['resizer']['square']['mode']); $this->configureParameterClass($container, $config); $this->configureExtra($container, $config); $this->configureHttpClient($container, $config); $this->configureProviders($container, $config); $this->configureAdapters($container, $config); $this->configureResizers($container, $config); } /** * @param array<string, mixed> $config */ public function configureProviders(ContainerBuilder $container, array $config): void { $container->getDefinition('sonata.media.provider.image') ->replaceArgument(5, array_map('strtolower', $config['providers']['image']['allowed_extensions'])) ->replaceArgument(6, $config['providers']['image']['allowed_mime_types']) ->replaceArgument(7, new Reference($config['providers']['image']['adapter'])); $container->getDefinition('sonata.media.provider.file') ->replaceArgument(5, $config['providers']['file']['allowed_extensions']) ->replaceArgument(6, $config['providers']['file']['allowed_mime_types']); $container->getDefinition('sonata.media.provider.youtube')->replaceArgument(8, $config['providers']['youtube']['html5']); } /** * @param array<string, mixed> $config */ public function configureParameterClass(ContainerBuilder $container, array $config): void { $container->setParameter('sonata.media.admin.media.entity', $config['class']['media']); $container->setParameter('sonata.media.admin.gallery.entity', $config['class']['gallery']); $container->setParameter('sonata.media.admin.gallery_item.entity', $config['class']['gallery_item']); $container->setParameter('sonata.media.media.class', $config['class']['media']); $container->setParameter('sonata.media.gallery.class', $config['class']['gallery']); $container->getDefinition('sonata.media.form.type.media')->replaceArgument(1, $config['class']['media']); } /** * Inject CDN dependency to default provider. * * @param array<string, mixed> $config */ public function configureCdnAdapter(ContainerBuilder $container, array $config): void { // add the default configuration for the server cdn if ($container->hasDefinition('sonata.media.cdn.server') && isset($config['cdn']['server'])) { $container->getDefinition('sonata.media.cdn.server') ->replaceArgument(0, $config['cdn']['server']['path']); } else { $container->removeDefinition('sonata.media.cdn.server'); } if ($container->hasDefinition('sonata.media.cdn.panther') && isset($config['cdn']['panther'])) { $container->getDefinition('sonata.media.cdn.panther') ->replaceArgument(0, $config['cdn']['panther']['path']) ->replaceArgument(1, $config['cdn']['panther']['username']) ->replaceArgument(2, $config['cdn']['panther']['password']) ->replaceArgument(3, $config['cdn']['panther']['site_id']); } else { $container->removeDefinition('sonata.media.cdn.panther'); } if ($container->hasDefinition('sonata.media.cdn.cloudfront') && isset($config['cdn']['cloudfront'])) { $cloudFrontConfig = []; if (isset($config['cdn']['cloudfront']['region'])) { $cloudFrontConfig['region'] = $config['cdn']['cloudfront']['region']; } if (isset($config['cdn']['cloudfront']['version'])) { $cloudFrontConfig['version'] = $config['cdn']['cloudfront']['version']; } $cloudFrontConfig['credentials'] = [ 'key' => $config['cdn']['cloudfront']['key'], 'secret' => $config['cdn']['cloudfront']['secret'], ]; $cloudFrontClass = CloudFrontVersion3::class; $container->getDefinition('sonata.media.cdn.cloudfront.client') ->replaceArgument(0, $cloudFrontConfig); $container->getDefinition('sonata.media.cdn.cloudfront') ->setClass($cloudFrontClass) ->replaceArgument(0, new Reference('sonata.media.cdn.cloudfront.client')) ->replaceArgument(1, $config['cdn']['cloudfront']['distribution_id']) ->replaceArgument(2, $config['cdn']['cloudfront']['path']); } else { $container->removeDefinition('sonata.media.cdn.cloudfront.client'); $container->removeDefinition('sonata.media.cdn.cloudfront'); } if ($container->hasDefinition('sonata.media.cdn.fallback') && isset($config['cdn']['fallback'])) { $container->getDefinition('sonata.media.cdn.fallback') ->replaceArgument(0, new Reference($config['cdn']['fallback']['master'])) ->replaceArgument(1, new Reference($config['cdn']['fallback']['fallback'])); } else { $container->removeDefinition('sonata.media.cdn.fallback'); } } /** * Inject filesystem dependency to default provider. * * @param array<string, mixed> $config */ public function configureFilesystemAdapter(ContainerBuilder $container, array $config): void { // add the default configuration for the local filesystem if ($container->hasDefinition('sonata.media.adapter.filesystem.local') && isset($config['filesystem']['local'])) { $container->getDefinition('sonata.media.adapter.filesystem.local') ->addArgument($config['filesystem']['local']['directory']) ->addArgument($config['filesystem']['local']['create']); } else { $container->removeDefinition('sonata.media.adapter.filesystem.local'); } // add the default configuration for the FTP filesystem if ($container->hasDefinition('sonata.media.adapter.filesystem.ftp') && isset($config['filesystem']['ftp'])) { $container->getDefinition('sonata.media.adapter.filesystem.ftp') ->addArgument($config['filesystem']['ftp']['directory']) ->addArgument($config['filesystem']['ftp']['host']) ->addArgument([ 'port' => $config['filesystem']['ftp']['port'], 'username' => $config['filesystem']['ftp']['username'], 'password' => $config['filesystem']['ftp']['password'], 'passive' => $config['filesystem']['ftp']['passive'], 'create' => $config['filesystem']['ftp']['create'], 'mode' => $config['filesystem']['ftp']['mode'], ]); } else { $container->removeDefinition('sonata.media.adapter.filesystem.ftp'); $container->removeDefinition('sonata.media.filesystem.ftp'); } // add the default configuration for the S3 filesystem if ($container->hasDefinition('sonata.media.adapter.filesystem.s3') && isset($config['filesystem']['s3'])) { $container->getDefinition('sonata.media.adapter.filesystem.s3') ->replaceArgument(0, new Reference('sonata.media.adapter.service.s3')) ->replaceArgument(1, $config['filesystem']['s3']['bucket']) ->replaceArgument(2, ['create' => $config['filesystem']['s3']['create'], 'region' => $config['filesystem']['s3']['region'], 'directory' => $config['filesystem']['s3']['directory'], 'ACL' => $config['filesystem']['s3']['acl']]); $container->getDefinition('sonata.media.metadata.amazon') ->replaceArgument(0, [ 'acl' => $config['filesystem']['s3']['acl'], 'storage' => $config['filesystem']['s3']['storage'], 'encryption' => $config['filesystem']['s3']['encryption'], 'meta' => $config['filesystem']['s3']['meta'], 'cache_control' => $config['filesystem']['s3']['cache_control'], ]); $arguments = [ 'region' => $config['filesystem']['s3']['region'], 'version' => $config['filesystem']['s3']['version'], ]; if (isset($config['filesystem']['s3']['endpoint'])) { $arguments['endpoint'] = $config['filesystem']['s3']['endpoint']; } if (isset($config['filesystem']['s3']['secretKey'], $config['filesystem']['s3']['accessKey'])) { $arguments['credentials'] = [ 'secret' => $config['filesystem']['s3']['secretKey'], 'key' => $config['filesystem']['s3']['accessKey'], ]; } $container->getDefinition('sonata.media.adapter.service.s3') ->replaceArgument(0, $arguments); } else { $container->removeDefinition('sonata.media.adapter.filesystem.s3'); $container->removeDefinition('sonata.media.filesystem.s3'); } if ($container->hasDefinition('sonata.media.adapter.filesystem.replicate') && isset($config['filesystem']['replicate'])) { $container->getDefinition('sonata.media.adapter.filesystem.replicate') ->replaceArgument(0, new Reference($config['filesystem']['replicate']['master'])) ->replaceArgument(1, new Reference($config['filesystem']['replicate']['slave'])); } else { $container->removeDefinition('sonata.media.adapter.filesystem.replicate'); $container->removeDefinition('sonata.media.filesystem.replicate'); } if ($container->hasDefinition('sonata.media.adapter.filesystem.opencloud') && (isset($config['filesystem']['openstack']) || isset($config['filesystem']['rackspace']))) { if (isset($config['filesystem']['openstack'])) { $container->removeDefinition('sonata.media.adapter.filesystem.opencloud.connection.rackspace'); $settings = 'openstack'; } else { $container->removeDefinition('sonata.media.adapter.filesystem.opencloud.connection.openstack'); $settings = 'rackspace'; } $container->getDefinition(sprintf('sonata.media.adapter.filesystem.opencloud.connection.%s', $settings)) ->replaceArgument(0, $config['filesystem'][$settings]['url']) ->replaceArgument(1, $config['filesystem'][$settings]['secret']); $container->getDefinition('sonata.media.adapter.filesystem.opencloud') ->replaceArgument(1, $config['filesystem'][$settings]['containerName']) ->replaceArgument(2, $config['filesystem'][$settings]['create_container']); $container->getDefinition('sonata.media.adapter.filesystem.opencloud.objectstore') ->replaceArgument(1, $config['filesystem'][$settings]['region']) ->setFactory([new Reference(sprintf('sonata.media.adapter.filesystem.opencloud.connection.%s', $settings)), 'ObjectStore']); } else { $container->removeDefinition('sonata.media.adapter.filesystem.opencloud'); $container->removeDefinition('sonata.media.adapter.filesystem.opencloud.connection.rackspace'); $container->removeDefinition('sonata.media.adapter.filesystem.opencloud.connection.openstack'); $container->removeDefinition('sonata.media.adapter.filesystem.opencloud.objectstore'); $container->removeDefinition('sonata.media.filesystem.opencloud'); } } /** * @param array<string, mixed> $config */ public function configureExtra(ContainerBuilder $container, array $config): void { if ($config['pixlr']['enabled']) { $container->getDefinition('sonata.media.extra.pixlr') ->replaceArgument(0, $config['pixlr']['referrer']) ->replaceArgument(1, $config['pixlr']['secret']); } else { $container->removeDefinition('sonata.media.extra.pixlr'); } } /** * Allow an extension to prepend the extension configurations. */ public function prepend(ContainerBuilder $container): void { $bundles = $container->getParameter('kernel.bundles'); \assert(\is_array($bundles)); // Store SonataAdminBundle configuration for later use if (isset($bundles['SonataAdminBundle'])) { $this->sonataAdminConfig = current($container->getExtensionConfig('sonata_admin')); } } /** * Checks if the classification of media is enabled. * * @param array<string, class-string> $bundles * @param array<string, mixed> $config */ private function isClassificationEnabled(array $bundles, array $config): bool { \assert(\is_bool($config['force_disable_category'])); return isset($bundles['SonataClassificationBundle']) && !$config['force_disable_category']; } /** * @param array<string, mixed> $config */ private function configureAdapters(ContainerBuilder $container, array $config): void { $container->setAlias('sonata.media.adapter.image.default', $config['adapters']['default']); } /** * @param array<string, mixed> $config */ private function configureResizers(ContainerBuilder $container, array $config): void { $container->setAlias('sonata.media.resizer.default', $config['resizers']['default']); } /** * @param array<string, class-string> $bundles * @param array<string, mixed> $config */ private function registerSonataDoctrineMapping(array $bundles, array $config): void { $collector = DoctrineCollector::getInstance(); $collector->addAssociation( $config['class']['media'], 'mapOneToMany', OptionsBuilder::createOneToMany('galleryItems', $config['class']['gallery_item']) ->cascade(['persist']) ->mappedBy('media') ); $collector->addAssociation( $config['class']['gallery_item'], 'mapManyToOne', OptionsBuilder::createManyToOne('gallery', $config['class']['gallery']) ->cascade(['persist']) ->inversedBy('galleryItems') ->addJoin([ 'name' => 'gallery_id', 'referencedColumnName' => 'id', 'onDelete' => 'CASCADE', ]) ); $collector->addAssociation( $config['class']['gallery_item'], 'mapManyToOne', OptionsBuilder::createManyToOne('media', $config['class']['media']) ->cascade(['persist']) ->inversedBy('galleryItems') ->addJoin([ 'name' => 'media_id', 'referencedColumnName' => 'id', 'onDelete' => 'CASCADE', ]) ); $collector->addAssociation( $config['class']['gallery'], 'mapOneToMany', OptionsBuilder::createOneToMany('galleryItems', $config['class']['gallery_item']) ->cascade(['persist']) ->mappedBy('gallery') ->orphanRemoval() ->addOrder('position', 'ASC') ); if ($this->isClassificationEnabled($bundles, $config)) { $collector->addAssociation( $config['class']['media'], 'mapManyToOne', OptionsBuilder::createManyToOne('category', $config['class']['category']) ->cascade(['persist']) ->addJoin([ 'name' => 'category_id', 'referencedColumnName' => 'id', 'onDelete' => 'SET NULL', ]) ); } } /** * @param array<string, mixed> $config */ private function configureHttpClient(ContainerBuilder $container, array $config): void { $container->setAlias('sonata.media.http.client', $config['http']['client']); $container->setAlias('sonata.media.http.message_factory', $config['http']['message_factory']); } }
1
12,280
For the record, class-string is now suported by PHPStorm. Don't know if it's worth moving it to `@phpstan-param` then.
sonata-project-SonataMediaBundle
php
@@ -2085,7 +2085,10 @@ class CommandDispatcher: raise cmdexc.CommandError(str(e)) widget = self._current_widget() - widget.run_js_async(js_code, callback=jseval_cb, world=world) + try: + widget.run_js_async(js_code, callback=jseval_cb, world=world) + except OverflowError: + raise cmdexc.CommandError("World Id not in valid range") @cmdutils.register(instance='command-dispatcher', scope='window') def fake_key(self, keystring, global_=False):
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Command dispatcher for TabbedBrowser.""" import os import os.path import shlex import functools import typing from PyQt5.QtWidgets import QApplication, QTabBar from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QEvent, QUrlQuery from PyQt5.QtPrintSupport import QPrintPreviewDialog from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners from qutebrowser.config import config, configdata from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate, webelem, downloads) from qutebrowser.keyinput import modeman, keyutils from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils, objreg, utils, standarddir) from qutebrowser.utils.usertypes import KeyMode from qutebrowser.misc import editor, guiprocess from qutebrowser.completion.models import urlmodel, miscmodels from qutebrowser.mainwindow import mainwindow class CommandDispatcher: """Command dispatcher for TabbedBrowser. Contains all commands which are related to the current tab. We can't simply add these commands to BrowserTab directly and use currentWidget() for TabbedBrowser.cmd because at the time cmdutils.register() decorators are run, currentWidget() will return None. Attributes: _win_id: The window ID the CommandDispatcher is associated with. _tabbed_browser: The TabbedBrowser used. """ def __init__(self, win_id, tabbed_browser): self._win_id = win_id self._tabbed_browser = tabbed_browser def __repr__(self): return utils.get_repr(self) def _new_tabbed_browser(self, private): """Get a tabbed-browser from a new window.""" new_window = mainwindow.MainWindow(private=private) new_window.show() return new_window.tabbed_browser def _count(self): """Convenience method to get the widget count.""" return self._tabbed_browser.widget.count() def _set_current_index(self, idx): """Convenience method to set the current widget index.""" cmdutils.check_overflow(idx, 'int') self._tabbed_browser.widget.setCurrentIndex(idx) def _current_index(self): """Convenience method to get the current widget index.""" return self._tabbed_browser.widget.currentIndex() def _current_url(self): """Convenience method to get the current url.""" try: return self._tabbed_browser.current_url() except qtutils.QtValueError as e: msg = "Current URL is invalid" if e.reason: msg += " ({})".format(e.reason) msg += "!" raise cmdexc.CommandError(msg) def _current_title(self): """Convenience method to get the current title.""" return self._current_widget().title() def _current_widget(self): """Get the currently active widget from a command.""" widget = self._tabbed_browser.widget.currentWidget() if widget is None: raise cmdexc.CommandError("No WebView available yet!") return widget def _open(self, url, tab=False, background=False, window=False, related=False, private=None): """Helper function to open a page. Args: url: The URL to open as QUrl. tab: Whether to open in a new tab. background: Whether to open in the background. window: Whether to open in a new window private: If opening a new window, open it in private browsing mode. If not given, inherit the current window's mode. """ urlutils.raise_cmdexc_if_invalid(url) tabbed_browser = self._tabbed_browser cmdutils.check_exclusive((tab, background, window, private), 'tbwp') if window and private is None: private = self._tabbed_browser.private if window or private: tabbed_browser = self._new_tabbed_browser(private) tabbed_browser.tabopen(url) elif tab: tabbed_browser.tabopen(url, background=False, related=related) elif background: tabbed_browser.tabopen(url, background=True, related=related) else: widget = self._current_widget() widget.openurl(url) def _cntwidget(self, count=None): """Return a widget based on a count/idx. Args: count: The tab index, or None. Return: The current widget if count is None. The widget with the given tab ID if count is given. None if no widget was found. """ if count is None: return self._tabbed_browser.widget.currentWidget() elif 1 <= count <= self._count(): cmdutils.check_overflow(count + 1, 'int') return self._tabbed_browser.widget.widget(count - 1) else: return None def _tab_focus_last(self, *, show_error=True): """Select the tab which was last focused.""" try: tab = objreg.get('last-focused-tab', scope='window', window=self._win_id) except KeyError: if not show_error: return raise cmdexc.CommandError("No last focused tab!") idx = self._tabbed_browser.widget.indexOf(tab) if idx == -1: raise cmdexc.CommandError("Last focused tab vanished!") self._set_current_index(idx) def _get_selection_override(self, prev, next_, opposite): """Helper function for tab_close to get the tab to select. Args: prev: Force selecting the tab before the current tab. next_: Force selecting the tab after the current tab. opposite: Force selecting the tab in the opposite direction of what's configured in 'tabs.select_on_remove'. Return: QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change should be made. """ cmdutils.check_exclusive((prev, next_, opposite), 'pno') if prev: return QTabBar.SelectLeftTab elif next_: return QTabBar.SelectRightTab elif opposite: conf_selection = config.val.tabs.select_on_remove if conf_selection == QTabBar.SelectLeftTab: return QTabBar.SelectRightTab elif conf_selection == QTabBar.SelectRightTab: return QTabBar.SelectLeftTab elif conf_selection == QTabBar.SelectPreviousTab: raise cmdexc.CommandError( "-o is not supported with 'tabs.select_on_remove' set to " "'last-used'!") else: # pragma: no cover raise ValueError("Invalid select_on_remove value " "{!r}!".format(conf_selection)) return None def _tab_close(self, tab, prev=False, next_=False, opposite=False): """Helper function for tab_close be able to handle message.async. Args: tab: Tab object to select be closed. prev: Force selecting the tab before the current tab. next_: Force selecting the tab after the current tab. opposite: Force selecting the tab in the opposite direction of what's configured in 'tabs.select_on_remove'. count: The tab index to close, or None """ tabbar = self._tabbed_browser.widget.tabBar() selection_override = self._get_selection_override(prev, next_, opposite) if selection_override is None: self._tabbed_browser.close_tab(tab) else: old_selection_behavior = tabbar.selectionBehaviorOnRemove() tabbar.setSelectionBehaviorOnRemove(selection_override) self._tabbed_browser.close_tab(tab) tabbar.setSelectionBehaviorOnRemove(old_selection_behavior) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def tab_close(self, prev=False, next_=False, opposite=False, force=False, count=None): """Close the current/[count]th tab. Args: prev: Force selecting the tab before the current tab. next_: Force selecting the tab after the current tab. opposite: Force selecting the tab in the opposite direction of what's configured in 'tabs.select_on_remove'. force: Avoid confirmation for pinned tabs. count: The tab index to close, or None """ tab = self._cntwidget(count) if tab is None: return close = functools.partial(self._tab_close, tab, prev, next_, opposite) self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close) @cmdutils.register(instance='command-dispatcher', scope='window', name='tab-pin') @cmdutils.argument('count', count=True) def tab_pin(self, count=None): """Pin/Unpin the current/[count]th tab. Pinning a tab shrinks it to the size of its title text. Attempting to close a pinned tab will cause a confirmation, unless --force is passed. Args: count: The tab index to pin or unpin, or None """ tab = self._cntwidget(count) if tab is None: return to_pin = not tab.data.pinned self._tabbed_browser.widget.set_tab_pinned(tab, to_pin) @cmdutils.register(instance='command-dispatcher', name='open', maxsplit=0, scope='window') @cmdutils.argument('url', completion=urlmodel.url) @cmdutils.argument('count', count=True) def openurl(self, url=None, related=False, bg=False, tab=False, window=False, count=None, secure=False, private=False): """Open a URL in the current/[count]th tab. If the URL contains newlines, each line gets opened in its own tab. Args: url: The URL to open. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. related: If opening a new tab, position the tab as related to the current one (like clicking on a link). count: The tab index to open the URL in, or None. secure: Force HTTPS. private: Open a new window in private browsing mode. """ if url is None: urls = [config.val.url.default_page] else: urls = self._parse_url_input(url) for i, cur_url in enumerate(urls): if secure: cur_url.setScheme('https') if not window and i > 0: tab = False bg = True if tab or bg or window or private: self._open(cur_url, tab, bg, window, related=related, private=private) else: curtab = self._cntwidget(count) if curtab is None: if count is None: # We want to open a URL in the current tab, but none # exists yet. self._tabbed_browser.tabopen(cur_url) else: # Explicit count with a tab that doesn't exist. return elif curtab.data.pinned: message.info("Tab is pinned!") else: curtab.openurl(cur_url) def _parse_url(self, url, *, force_search=False): """Parse a URL or quickmark or search query. Args: url: The URL to parse. force_search: Whether to force a search even if the content can be interpreted as a URL or a path. Return: A URL that can be opened. """ try: return objreg.get('quickmark-manager').get(url) except urlmarks.Error: try: return urlutils.fuzzy_url(url, force_search=force_search) except urlutils.InvalidUrlError as e: # We don't use cmdexc.CommandError here as this can be # called async from edit_url message.error(str(e)) return None def _parse_url_input(self, url): """Parse a URL or newline-separated list of URLs. Args: url: The URL or list to parse. Return: A list of URLs that can be opened. """ if isinstance(url, QUrl): yield url return force_search = False urllist = [u for u in url.split('\n') if u.strip()] if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and urlutils.get_path_if_valid(urllist[0], check_exists=True) is None): urllist = [url] force_search = True for cur_url in urllist: parsed = self._parse_url(cur_url, force_search=force_search) if parsed is not None: yield parsed @cmdutils.register(instance='command-dispatcher', name='reload', scope='window') @cmdutils.argument('count', count=True) def reloadpage(self, force=False, count=None): """Reload the current/[count]th tab. Args: count: The tab index to reload, or None. force: Bypass the page cache. """ tab = self._cntwidget(count) if tab is not None: tab.reload(force=force) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def stop(self, count=None): """Stop loading in the current/[count]th tab. Args: count: The tab index to stop, or None. """ tab = self._cntwidget(count) if tab is not None: tab.stop() def _print_preview(self, tab): """Show a print preview.""" def print_callback(ok): if not ok: message.error("Printing failed!") tab.printing.check_preview_support() diag = QPrintPreviewDialog(tab) diag.setAttribute(Qt.WA_DeleteOnClose) diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint) diag.paintRequested.connect(functools.partial( tab.printing.to_printer, callback=print_callback)) diag.exec_() def _print_pdf(self, tab, filename): """Print to the given PDF file.""" tab.printing.check_pdf_support() filename = os.path.expanduser(filename) directory = os.path.dirname(filename) if directory and not os.path.exists(directory): os.mkdir(directory) tab.printing.to_pdf(filename) log.misc.debug("Print to file: {}".format(filename)) @cmdutils.register(instance='command-dispatcher', name='print', scope='window') @cmdutils.argument('count', count=True) @cmdutils.argument('pdf', flag='f', metavar='file') def printpage(self, preview=False, count=None, *, pdf=None): """Print the current/[count]th tab. Args: preview: Show preview instead of printing. count: The tab index to print, or None. pdf: The file path to write the PDF to. """ tab = self._cntwidget(count) if tab is None: return try: if preview: self._print_preview(tab) elif pdf: self._print_pdf(tab, pdf) else: tab.printing.show_dialog() except browsertab.WebTabError as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_clone(self, bg=False, window=False): """Duplicate the current tab. Args: bg: Open in a background tab. window: Open in a new window. Return: The new QWebView. """ cmdutils.check_exclusive((bg, window), 'bw') curtab = self._current_widget() cur_title = self._tabbed_browser.widget.page_title( self._current_index()) try: history = curtab.history.serialize() except browsertab.WebTabError as e: raise cmdexc.CommandError(e) # The new tab could be in a new tabbed_browser (e.g. because of # tabs.tabs_are_windows being set) if window: new_tabbed_browser = self._new_tabbed_browser( private=self._tabbed_browser.private) else: new_tabbed_browser = self._tabbed_browser newtab = new_tabbed_browser.tabopen(background=bg) new_tabbed_browser = objreg.get('tabbed-browser', scope='window', window=newtab.win_id) idx = new_tabbed_browser.widget.indexOf(newtab) new_tabbed_browser.widget.set_page_title(idx, cur_title) if curtab.data.should_show_icon(): new_tabbed_browser.widget.setTabIcon(idx, curtab.icon()) if config.val.tabs.tabs_are_windows: new_tabbed_browser.widget.window().setWindowIcon(curtab.icon()) newtab.data.keep_icon = True newtab.history.deserialize(history) newtab.zoom.set_factor(curtab.zoom.factor()) new_tabbed_browser.widget.set_tab_pinned(newtab, curtab.data.pinned) return newtab @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('index', completion=miscmodels.other_buffer) def tab_take(self, index): """Take a tab from another window. Args: index: The [win_id/]index of the tab to take. Or a substring in which case the closest match will be taken. """ tabbed_browser, tab = self._resolve_buffer_index(index) if tabbed_browser is self._tabbed_browser: raise cmdexc.CommandError("Can't take a tab from the same window") self._open(tab.url(), tab=True) tabbed_browser.close_tab(tab, add_undo=False) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('win_id', completion=miscmodels.window) @cmdutils.argument('count', count=True) def tab_give(self, win_id: int = None, count=None): """Give the current tab to a new or existing window if win_id given. If no win_id is given, the tab will get detached into a new window. Args: win_id: The window ID of the window to give the current tab to. count: Overrides win_id (index starts at 1 for win_id=0). """ if count is not None: win_id = count - 1 if win_id == self._win_id: raise cmdexc.CommandError("Can't give a tab to the same window") if win_id is None: if self._count() < 2: raise cmdexc.CommandError("Cannot detach from a window with " "only one tab") tabbed_browser = self._new_tabbed_browser( private=self._tabbed_browser.private) else: if win_id not in objreg.window_registry: raise cmdexc.CommandError( "There's no window with id {}!".format(win_id)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) tabbed_browser.tabopen(self._current_url()) self._tabbed_browser.close_tab(self._current_widget(), add_undo=False) def _back_forward(self, tab, bg, window, count, forward): """Helper function for :back/:forward.""" history = self._current_widget().history # Catch common cases before e.g. cloning tab if not forward and not history.can_go_back(): raise cmdexc.CommandError("At beginning of history.") elif forward and not history.can_go_forward(): raise cmdexc.CommandError("At end of history.") if tab or bg or window: widget = self.tab_clone(bg, window) else: widget = self._current_widget() try: if forward: widget.history.forward(count) else: widget.history.back(count) except browsertab.WebTabError as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def back(self, tab=False, bg=False, window=False, count=1): """Go back in the history of the current tab. Args: tab: Go back in a new tab. bg: Go back in a background tab. window: Go back in a new window. count: How many pages to go back. """ self._back_forward(tab, bg, window, count, forward=False) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def forward(self, tab=False, bg=False, window=False, count=1): """Go forward in the history of the current tab. Args: tab: Go forward in a new tab. bg: Go forward in a background tab. window: Go forward in a new window. count: How many pages to go forward. """ self._back_forward(tab, bg, window, count, forward=True) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment', 'decrement']) @cmdutils.argument('count', count=True) def navigate(self, where: str, tab=False, bg=False, window=False, count=1): """Open typical prev/next links or navigate using the URL path. This tries to automatically click on typical _Previous Page_ or _Next Page_ links using some heuristics. Alternatively it can navigate by changing the current URL. Args: where: What to open. - `prev`: Open a _previous_ link. - `next`: Open a _next_ link. - `up`: Go up a level in the current URL. - `increment`: Increment the last number in the URL. Uses the link:settings.html#url.incdec_segments[url.incdec_segments] config option. - `decrement`: Decrement the last number in the URL. Uses the link:settings.html#url.incdec_segments[url.incdec_segments] config option. tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. count: For `increment` and `decrement`, the number to change the URL by. For `up`, the number of levels to go up in the URL. """ # save the pre-jump position in the special ' mark self.set_mark("'") cmdutils.check_exclusive((tab, bg, window), 'tbw') widget = self._current_widget() url = self._current_url().adjusted(QUrl.RemoveFragment) handlers = { 'prev': functools.partial(navigate.prevnext, prev=True), 'next': functools.partial(navigate.prevnext, prev=False), 'up': navigate.path_up, 'decrement': functools.partial(navigate.incdec, inc_or_dec='decrement'), 'increment': functools.partial(navigate.incdec, inc_or_dec='increment'), } try: if where in ['prev', 'next']: handler = handlers[where] handler(browsertab=widget, win_id=self._win_id, baseurl=url, tab=tab, background=bg, window=window) elif where in ['up', 'increment', 'decrement']: new_url = handlers[where](url, count) self._open(new_url, tab, bg, window, related=True) else: # pragma: no cover raise ValueError("Got called with invalid value {} for " "`where'.".format(where)) except navigate.Error as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def scroll_px(self, dx: int, dy: int, count=1): """Scroll the current tab by 'count * dx/dy' pixels. Args: dx: How much to scroll in x-direction. dy: How much to scroll in y-direction. count: multiplier """ dx *= count dy *= count cmdutils.check_overflow(dx, 'int') cmdutils.check_overflow(dy, 'int') self._current_widget().scroller.delta(dx, dy) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def scroll(self, direction: typing.Union[str, int], count=1): """Scroll the current tab in the given direction. Note you can use `:run-with-count` to have a keybinding with a bigger scroll increment. Args: direction: In which direction to scroll (up/down/left/right/top/bottom). count: multiplier """ tab = self._current_widget() funcs = { 'up': tab.scroller.up, 'down': tab.scroller.down, 'left': tab.scroller.left, 'right': tab.scroller.right, 'top': tab.scroller.top, 'bottom': tab.scroller.bottom, 'page-up': tab.scroller.page_up, 'page-down': tab.scroller.page_down, } try: func = funcs[direction] except KeyError: expected_values = ', '.join(sorted(funcs)) raise cmdexc.CommandError("Invalid value {!r} for direction - " "expected one of: {}".format( direction, expected_values)) if direction in ['top', 'bottom']: func() else: func(count=count) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) @cmdutils.argument('horizontal', flag='x') def scroll_to_perc(self, perc: float = None, horizontal=False, count=None): """Scroll to a specific percentage of the page. The percentage can be given either as argument or as count. If no percentage is given, the page is scrolled to the end. Args: perc: Percentage to scroll. horizontal: Scroll horizontally instead of vertically. count: Percentage to scroll. """ # save the pre-jump position in the special ' mark self.set_mark("'") if perc is None and count is None: perc = 100 elif count is not None: perc = count if horizontal: x = perc y = None else: x = None y = perc self._current_widget().scroller.to_perc(x, y) @cmdutils.register(instance='command-dispatcher', scope='window') def scroll_to_anchor(self, name): """Scroll to the given anchor in the document. Args: name: The anchor to scroll to. """ self._current_widget().scroller.to_anchor(name) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) @cmdutils.argument('top_navigate', metavar='ACTION', choices=('prev', 'decrement')) @cmdutils.argument('bottom_navigate', metavar='ACTION', choices=('next', 'increment')) def scroll_page(self, x: float, y: float, *, top_navigate: str = None, bottom_navigate: str = None, count=1): """Scroll the frame page-wise. Args: x: How many pages to scroll to the right. y: How many pages to scroll down. bottom_navigate: :navigate action (next, increment) to run when scrolling down at the bottom of the page. top_navigate: :navigate action (prev, decrement) to run when scrolling up at the top of the page. count: multiplier """ tab = self._current_widget() if not tab.url().isValid(): # See https://github.com/qutebrowser/qutebrowser/issues/701 return if bottom_navigate is not None and tab.scroller.at_bottom(): self.navigate(bottom_navigate) return elif top_navigate is not None and tab.scroller.at_top(): self.navigate(top_navigate) return try: tab.scroller.delta_page(count * x, count * y) except OverflowError: raise cmdexc.CommandError( "Numeric argument is too large for internal int " "representation.") def _yank_url(self, what): """Helper method for yank() to get the URL to copy.""" assert what in ['url', 'pretty-url'], what flags = QUrl.RemovePassword if what == 'pretty-url': flags |= QUrl.DecodeReserved else: flags |= QUrl.FullyEncoded url = QUrl(self._current_url()) url_query = QUrlQuery() url_query_str = urlutils.query_string(url) if '&' not in url_query_str and ';' in url_query_str: url_query.setQueryDelimiters('=', ';') url_query.setQuery(url_query_str) for key in dict(url_query.queryItems()): if key in config.val.url.yank_ignored_parameters: url_query.removeQueryItem(key) url.setQuery(url_query) return url.toString(flags) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('what', choices=['selection', 'url', 'pretty-url', 'title', 'domain']) def yank(self, what='url', sel=False, keep=False): """Yank something to the clipboard or primary selection. Args: what: What to yank. - `url`: The current URL. - `pretty-url`: The URL in pretty decoded form. - `title`: The current page's title. - `domain`: The current scheme, domain, and port number. - `selection`: The selection under the cursor. sel: Use the primary selection instead of the clipboard. keep: Stay in visual mode after yanking the selection. """ if what == 'title': s = self._tabbed_browser.widget.page_title(self._current_index()) elif what == 'domain': port = self._current_url().port() s = '{}://{}{}'.format(self._current_url().scheme(), self._current_url().host(), ':' + str(port) if port > -1 else '') elif what in ['url', 'pretty-url']: s = self._yank_url(what) what = 'URL' # For printing elif what == 'selection': def _selection_callback(s): if not s: message.info("Nothing to yank") return self._yank_to_target(s, sel, what, keep) caret = self._current_widget().caret caret.selection(callback=_selection_callback) return else: # pragma: no cover raise ValueError("Invalid value {!r} for `what'.".format(what)) self._yank_to_target(s, sel, what, keep) def _yank_to_target(self, s, sel, what, keep): if sel and utils.supports_selection(): target = "primary selection" else: sel = False target = "clipboard" utils.set_clipboard(s, selection=sel) if what != 'selection': message.info("Yanked {} to {}: {}".format(what, target, s)) else: message.info("{} {} yanked to {}".format( len(s), "char" if len(s) == 1 else "chars", target)) if not keep: modeman.leave(self._win_id, KeyMode.caret, "yank selected", maybe=True) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def zoom_in(self, count=1): """Increase the zoom level for the current tab. Args: count: How many steps to zoom in. """ tab = self._current_widget() try: perc = tab.zoom.offset(count) except ValueError as e: raise cmdexc.CommandError(e) message.info("Zoom level: {}%".format(int(perc)), replace=True) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def zoom_out(self, count=1): """Decrease the zoom level for the current tab. Args: count: How many steps to zoom out. """ tab = self._current_widget() try: perc = tab.zoom.offset(-count) except ValueError as e: raise cmdexc.CommandError(e) message.info("Zoom level: {}%".format(int(perc)), replace=True) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def zoom(self, zoom=None, count=None): """Set the zoom level for the current tab. The zoom can be given as argument or as [count]. If neither is given, the zoom is set to the default zoom. If both are given, use [count]. Args: zoom: The zoom percentage to set. count: The zoom percentage to set. """ if zoom is not None: try: zoom = int(zoom.rstrip('%')) except ValueError: raise cmdexc.CommandError("zoom: Invalid int value {}" .format(zoom)) level = count if count is not None else zoom if level is None: level = config.val.zoom.default tab = self._current_widget() try: tab.zoom.set_factor(float(level) / 100) except ValueError: raise cmdexc.CommandError("Can't zoom {}%!".format(level)) message.info("Zoom level: {}%".format(int(level)), replace=True) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_only(self, prev=False, next_=False, force=False): """Close all tabs except for the current one. Args: prev: Keep tabs before the current. next_: Keep tabs after the current. force: Avoid confirmation for pinned tabs. """ cmdutils.check_exclusive((prev, next_), 'pn') cur_idx = self._tabbed_browser.widget.currentIndex() assert cur_idx != -1 def _to_close(i): """Helper method to check if a tab should be closed or not.""" return not (i == cur_idx or (prev and i < cur_idx) or (next_ and i > cur_idx)) # close as many tabs as we can first_tab = True pinned_tabs_cleanup = False for i, tab in enumerate(self._tabbed_browser.widgets()): if _to_close(i): if force or not tab.data.pinned: self._tabbed_browser.close_tab(tab, new_undo=first_tab) first_tab = False else: pinned_tabs_cleanup = tab # Check to see if we would like to close any pinned tabs if pinned_tabs_cleanup: self._tabbed_browser.tab_close_prompt_if_pinned( pinned_tabs_cleanup, force, lambda: self.tab_only( prev=prev, next_=next_, force=True), text="Are you sure you want to close pinned tabs?") @cmdutils.register(instance='command-dispatcher', scope='window') def undo(self): """Re-open the last closed tab or tabs.""" try: self._tabbed_browser.undo() except IndexError: raise cmdexc.CommandError("Nothing to undo!") @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def tab_prev(self, count=1): """Switch to the previous tab, or switch [count] tabs back. Args: count: How many tabs to switch back. """ if self._count() == 0: # Running :tab-prev after last tab was closed # See https://github.com/qutebrowser/qutebrowser/issues/1448 return newidx = self._current_index() - count if newidx >= 0: self._set_current_index(newidx) elif config.val.tabs.wrap: self._set_current_index(newidx % self._count()) else: log.webview.debug("First tab") @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def tab_next(self, count=1): """Switch to the next tab, or switch [count] tabs forward. Args: count: How many tabs to switch forward. """ if self._count() == 0: # Running :tab-next after last tab was closed # See https://github.com/qutebrowser/qutebrowser/issues/1448 return newidx = self._current_index() + count if newidx < self._count(): self._set_current_index(newidx) elif config.val.tabs.wrap: self._set_current_index(newidx % self._count()) else: log.webview.debug("Last tab") def _resolve_buffer_index(self, index): """Resolve a buffer index to the tabbedbrowser and tab. Args: index: The [win_id/]index of the tab to be selected. Or a substring in which case the closest match will be focused. """ index_parts = index.split('/', 1) try: for part in index_parts: int(part) except ValueError: model = miscmodels.buffer() model.set_pattern(index) if model.count() > 0: index = model.data(model.first_item()) index_parts = index.split('/', 1) else: raise cmdexc.CommandError( "No matching tab for: {}".format(index)) if len(index_parts) == 2: win_id = int(index_parts[0]) idx = int(index_parts[1]) elif len(index_parts) == 1: idx = int(index_parts[0]) active_win = objreg.get('app').activeWindow() if active_win is None: # Not sure how you enter a command without an active window... raise cmdexc.CommandError( "No window specified and couldn't find active window!") win_id = active_win.win_id if win_id not in objreg.window_registry: raise cmdexc.CommandError( "There's no window with id {}!".format(win_id)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) if not 0 < idx <= tabbed_browser.widget.count(): raise cmdexc.CommandError( "There's no tab with index {}!".format(idx)) return (tabbed_browser, tabbed_browser.widget.widget(idx-1)) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('index', completion=miscmodels.buffer) @cmdutils.argument('count', count=True) def buffer(self, index=None, count=None): """Select tab by index or url/title best match. Focuses window if necessary when index is given. If both index and count are given, use count. With neither index nor count given, open the qute://tabs page. Args: index: The [win_id/]index of the tab to focus. Or a substring in which case the closest match will be focused. count: The tab index to focus, starting with 1. """ if count is None and index is None: self.openurl('qute://tabs/', tab=True) return if count is not None: index = str(count) tabbed_browser, tab = self._resolve_buffer_index(index) window = tabbed_browser.widget.window() window.activateWindow() window.raise_() tabbed_browser.widget.setCurrentWidget(tab) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('index', choices=['last']) @cmdutils.argument('count', count=True) def tab_focus(self, index: typing.Union[str, int] = None, count=None, no_last=False): """Select the tab given as argument/[count]. If neither count nor index are given, it behaves like tab-next. If both are given, use count. Args: index: The tab index to focus, starting with 1. The special value `last` focuses the last focused tab (regardless of count). Negative indices count from the end, such that -1 is the last tab. count: The tab index to focus, starting with 1. no_last: Whether to avoid focusing last tab if already focused. """ index = count if count is not None else index if index == 'last': self._tab_focus_last() return elif not no_last and index == self._current_index() + 1: self._tab_focus_last(show_error=False) return elif index is None: self.tab_next() return if index < 0: index = self._count() + index + 1 if 1 <= index <= self._count(): self._set_current_index(index - 1) else: raise cmdexc.CommandError("There's no tab with index {}!".format( index)) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('index', choices=['+', '-']) @cmdutils.argument('count', count=True) def tab_move(self, index: typing.Union[str, int] = None, count=None): """Move the current tab according to the argument and [count]. If neither is given, move it to the first position. Args: index: `+` or `-` to move relative to the current tab by count, or a default of 1 space. A tab index to move to that index. count: If moving relatively: Offset. If moving absolutely: New position (default: 0). This overrides the index argument, if given. """ if index in ['+', '-']: # relative moving new_idx = self._current_index() delta = 1 if count is None else count if index == '-': new_idx -= delta elif index == '+': # pragma: no branch new_idx += delta if config.val.tabs.wrap: new_idx %= self._count() else: # absolute moving if count is not None: new_idx = count - 1 elif index is not None: new_idx = index - 1 if index >= 0 else index + self._count() else: new_idx = 0 if not 0 <= new_idx < self._count(): raise cmdexc.CommandError("Can't move tab to position {}!".format( new_idx + 1)) cur_idx = self._current_index() cmdutils.check_overflow(cur_idx, 'int') cmdutils.check_overflow(new_idx, 'int') self._tabbed_browser.widget.tabBar().moveTab(cur_idx, new_idx) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0, no_replace_variables=True) @cmdutils.argument('count', count=True) def spawn(self, cmdline, userscript=False, verbose=False, output=False, detach=False, count=None): """Spawn a command in a shell. Args: userscript: Run the command as a userscript. You can use an absolute path, or store the userscript in one of those locations: - `~/.local/share/qutebrowser/userscripts` (or `$XDG_DATA_DIR`) - `/usr/share/qutebrowser/userscripts` verbose: Show notifications when the command started/exited. output: Whether the output should be shown in a new tab. detach: Whether the command should be detached from qutebrowser. cmdline: The commandline to execute. count: Given to userscripts as $QUTE_COUNT. """ cmdutils.check_exclusive((userscript, detach), 'ud') try: cmd, *args = shlex.split(cmdline) except ValueError as e: raise cmdexc.CommandError("Error while splitting command: " "{}".format(e)) args = runners.replace_variables(self._win_id, args) log.procs.debug("Executing {} with args {}, userscript={}".format( cmd, args, userscript)) @pyqtSlot() def _on_proc_finished(): if output: tb = objreg.get('tabbed-browser', scope='window', window='last-focused') tb.openurl(QUrl('qute://spawn-output'), newtab=True) if userscript: def _selection_callback(s): try: runner = self._run_userscript(s, cmd, args, verbose, count) runner.finished.connect(_on_proc_finished) except cmdexc.CommandError as e: message.error(str(e)) # ~ expansion is handled by the userscript module. # dirty hack for async call because of: # https://bugreports.qt.io/browse/QTBUG-53134 # until it fixed or blocked async call implemented: # https://github.com/qutebrowser/qutebrowser/issues/3327 caret = self._current_widget().caret caret.selection(callback=_selection_callback) else: cmd = os.path.expanduser(cmd) proc = guiprocess.GUIProcess(what='command', verbose=verbose, parent=self._tabbed_browser) if detach: proc.start_detached(cmd, args) else: proc.start(cmd, args) proc.finished.connect(_on_proc_finished) @cmdutils.register(instance='command-dispatcher', scope='window') def home(self): """Open main startpage in current tab.""" self.openurl(config.val.url.start_pages[0]) def _run_userscript(self, selection, cmd, args, verbose, count): """Run a userscript given as argument. Args: cmd: The userscript to run. args: Arguments to pass to the userscript. verbose: Show notifications when the command started/exited. count: Exposed to the userscript. """ env = { 'QUTE_MODE': 'command', 'QUTE_SELECTED_TEXT': selection, } if count is not None: env['QUTE_COUNT'] = str(count) idx = self._current_index() if idx != -1: env['QUTE_TITLE'] = self._tabbed_browser.widget.page_title(idx) # FIXME:qtwebengine: If tab is None, run_async will fail! tab = self._tabbed_browser.widget.currentWidget() try: url = self._tabbed_browser.current_url() except qtutils.QtValueError: pass else: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) try: runner = userscripts.run_async( tab, cmd, *args, win_id=self._win_id, env=env, verbose=verbose) except userscripts.Error as e: raise cmdexc.CommandError(e) return runner @cmdutils.register(instance='command-dispatcher', scope='window') def quickmark_save(self): """Save the current page as a quickmark.""" quickmark_manager = objreg.get('quickmark-manager') quickmark_manager.prompt_save(self._current_url()) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('name', completion=miscmodels.quickmark) def quickmark_load(self, name, tab=False, bg=False, window=False): """Load a quickmark. Args: name: The name of the quickmark to load. tab: Load the quickmark in a new tab. bg: Load the quickmark in a new background tab. window: Load the quickmark in a new window. """ try: url = objreg.get('quickmark-manager').get(name) except urlmarks.Error as e: raise cmdexc.CommandError(str(e)) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('name', completion=miscmodels.quickmark) def quickmark_del(self, name=None): """Delete a quickmark. Args: name: The name of the quickmark to delete. If not given, delete the quickmark for the current page (choosing one arbitrarily if there are more than one). """ quickmark_manager = objreg.get('quickmark-manager') if name is None: url = self._current_url() try: name = quickmark_manager.get_by_qurl(url) except urlmarks.DoesNotExistError as e: raise cmdexc.CommandError(str(e)) try: quickmark_manager.delete(name) except KeyError: raise cmdexc.CommandError("Quickmark '{}' not found!".format(name)) @cmdutils.register(instance='command-dispatcher', scope='window') def bookmark_add(self, url=None, title=None, toggle=False): """Save the current page as a bookmark, or a specific url. If no url and title are provided, then save the current page as a bookmark. If a url and title have been provided, then save the given url as a bookmark with the provided title. You can view all saved bookmarks on the link:qute://bookmarks[bookmarks page]. Args: url: url to save as a bookmark. If not given, use url of current page. title: title of the new bookmark. toggle: remove the bookmark instead of raising an error if it already exists. """ if url and not title: raise cmdexc.CommandError('Title must be provided if url has ' 'been provided') bookmark_manager = objreg.get('bookmark-manager') if not url: url = self._current_url() else: try: url = urlutils.fuzzy_url(url) except urlutils.InvalidUrlError as e: raise cmdexc.CommandError(e) if not title: title = self._current_title() try: was_added = bookmark_manager.add(url, title, toggle=toggle) except urlmarks.Error as e: raise cmdexc.CommandError(str(e)) else: msg = "Bookmarked {}" if was_added else "Removed bookmark {}" message.info(msg.format(url.toDisplayString())) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('url', completion=miscmodels.bookmark) def bookmark_load(self, url, tab=False, bg=False, window=False, delete=False): """Load a bookmark. Args: url: The url of the bookmark to load. tab: Load the bookmark in a new tab. bg: Load the bookmark in a new background tab. window: Load the bookmark in a new window. delete: Whether to delete the bookmark afterwards. """ try: qurl = urlutils.fuzzy_url(url) except urlutils.InvalidUrlError as e: raise cmdexc.CommandError(e) self._open(qurl, tab, bg, window) if delete: self.bookmark_del(url) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) @cmdutils.argument('url', completion=miscmodels.bookmark) def bookmark_del(self, url=None): """Delete a bookmark. Args: url: The url of the bookmark to delete. If not given, use the current page's url. """ if url is None: url = self._current_url().toString(QUrl.RemovePassword | QUrl.FullyEncoded) try: objreg.get('bookmark-manager').delete(url) except KeyError: raise cmdexc.CommandError("Bookmark '{}' not found!".format(url)) @cmdutils.register(instance='command-dispatcher', scope='window') def follow_selected(self, *, tab=False): """Follow the selected text. Args: tab: Load the selected link in a new tab. """ try: self._current_widget().caret.follow_selected(tab=tab) except browsertab.WebTabError as e: raise cmdexc.CommandError(str(e)) @cmdutils.register(instance='command-dispatcher', name='inspector', scope='window') def toggle_inspector(self): """Toggle the web inspector. Note: Due a bug in Qt, the inspector will show incorrect request headers in the network tab. """ tab = self._current_widget() # FIXME:qtwebengine have a proper API for this page = tab._widget.page() # pylint: disable=protected-access try: if tab.data.inspector is None: tab.data.inspector = inspector.create() tab.data.inspector.inspect(page) tab.data.inspector.show() else: tab.data.inspector.toggle(page) except inspector.WebInspectorError as e: raise cmdexc.CommandError(e) @cmdutils.register(instance='command-dispatcher', scope='window') def download(self, url=None, *, mhtml_=False, dest=None): """Download a given URL, or current page if no URL given. Args: url: The URL to download. If not given, download the current page. dest: The file path to write the download to, or None to ask. mhtml_: Download the current page and all assets as mhtml file. """ # FIXME:qtwebengine do this with the QtWebEngine download manager? download_manager = objreg.get('qtnetwork-download-manager') target = None if dest is not None: dest = downloads.transform_path(dest) if dest is None: raise cmdexc.CommandError("Invalid target filename") target = downloads.FileDownloadTarget(dest) tab = self._current_widget() user_agent = tab.user_agent() if url: if mhtml_: raise cmdexc.CommandError("Can only download the current page" " as mhtml.") url = urlutils.qurl_from_user_input(url) urlutils.raise_cmdexc_if_invalid(url) download_manager.get(url, user_agent=user_agent, target=target) elif mhtml_: tab = self._current_widget() if tab.backend == usertypes.Backend.QtWebEngine: webengine_download_manager = objreg.get( 'webengine-download-manager') try: webengine_download_manager.get_mhtml(tab, target) except browsertab.UnsupportedOperationError as e: raise cmdexc.CommandError(e) else: download_manager.get_mhtml(tab, target) else: qnam = tab.networkaccessmanager() suggested_fn = downloads.suggested_fn_from_title( self._current_url().path(), tab.title() ) download_manager.get( self._current_url(), user_agent=user_agent, qnam=qnam, target=target, suggested_fn=suggested_fn ) @cmdutils.register(instance='command-dispatcher', scope='window') def view_source(self, edit=False, pygments=False): """Show the source of the current page in a new tab. Args: edit: Edit the source in the editor instead of opening a tab. pygments: Use pygments to generate the view. This is always the case for QtWebKit. For QtWebEngine it may display slightly different source. Some JavaScript processing may be applied. """ tab = self._current_widget() try: current_url = self._current_url() except cmdexc.CommandError as e: message.error(str(e)) return if current_url.scheme() == 'view-source' or tab.data.viewing_source: raise cmdexc.CommandError("Already viewing source!") if edit: ed = editor.ExternalEditor(self._tabbed_browser) tab.dump_async(ed.edit) else: tab.action.show_source(pygments) @cmdutils.register(instance='command-dispatcher', scope='window', debug=True) def debug_dump_page(self, dest, plain=False): """Dump the current page's content to a file. Args: dest: Where to write the file to. plain: Write plain text instead of HTML. """ tab = self._current_widget() dest = os.path.expanduser(dest) def callback(data): """Write the data to disk.""" try: with open(dest, 'w', encoding='utf-8') as f: f.write(data) except OSError as e: message.error('Could not write page: {}'.format(e)) else: message.info("Dumped page to {}.".format(dest)) tab.dump_async(callback, plain=plain) @cmdutils.register(instance='command-dispatcher', scope='window') def history(self, tab=True, bg=False, window=False): """Show browsing history. Args: tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. """ url = QUrl('qute://history/') self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', name='help', scope='window') @cmdutils.argument('topic', completion=miscmodels.helptopic) def show_help(self, tab=False, bg=False, window=False, topic=None): r"""Show help about a command or setting. Args: tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. topic: The topic to show help for. - :__command__ for commands. - __section__.__option__ for settings. """ if topic is None: path = 'index.html' elif topic.startswith(':'): command = topic[1:] if command not in cmdutils.cmd_dict: raise cmdexc.CommandError("Invalid command {}!".format( command)) path = 'commands.html#{}'.format(command) elif topic in configdata.DATA: path = 'settings.html#{}'.format(topic) else: raise cmdexc.CommandError("Invalid help topic {}!".format(topic)) url = QUrl('qute://help/{}'.format(path)) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', scope='window') def messages(self, level='info', plain=False, tab=False, bg=False, window=False): """Show a log of past messages. Args: level: Include messages with `level` or higher severity. Valid values: vdebug, debug, info, warning, error, critical. plain: Whether to show plaintext (as opposed to html). tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. """ if level.upper() not in log.LOG_LEVELS: raise cmdexc.CommandError("Invalid log level {}!".format(level)) if plain: url = QUrl('qute://plainlog?level={}'.format(level)) else: url = QUrl('qute://log?level={}'.format(level)) self._open(url, tab, bg, window) def _open_editor_cb(self, elem): """Open editor after the focus elem was found in open_editor.""" if elem is None: message.error("No element focused!") return if not elem.is_editable(strict=True): message.error("Focused element is not editable!") return text = elem.value() if text is None: message.error("Could not get text from the focused element.") return assert isinstance(text, str), text caret_position = elem.caret_position() ed = editor.ExternalEditor(watch=True, parent=self._tabbed_browser) ed.file_updated.connect(functools.partial( self.on_file_updated, ed, elem)) ed.editing_finished.connect(lambda: mainwindow.raise_window( objreg.last_focused_window(), alert=False)) ed.edit(text, caret_position) @cmdutils.register(instance='command-dispatcher', scope='window') def open_editor(self): """Open an external editor with the currently selected form field. The editor which should be launched can be configured via the `editor.command` config option. """ tab = self._current_widget() tab.elements.find_focused(self._open_editor_cb) def on_file_updated(self, ed, elem, text): """Write the editor text into the form field and clean up tempfile. Callback for GUIProcess when the edited text was updated. Args: elem: The WebElementWrapper which was modified. text: The new text to insert. """ try: elem.set_value(text) except webelem.OrphanedError: message.error('Edited element vanished') ed.backup() except webelem.Error as e: message.error(str(e)) ed.backup() @cmdutils.register(instance='command-dispatcher', maxsplit=0, scope='window') def insert_text(self, text): """Insert text at cursor position. Args: text: The text to insert. """ tab = self._current_widget() def _insert_text_cb(elem): if elem is None: message.error("No element focused!") return try: elem.insert_text(text) except webelem.Error as e: message.error(str(e)) return tab.elements.find_focused(_insert_text_cb) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('filter_', choices=['id']) def click_element(self, filter_: str, value, *, target: usertypes.ClickTarget = usertypes.ClickTarget.normal, force_event=False): """Click the element matching the given filter. The given filter needs to result in exactly one element, otherwise, an error is shown. Args: filter_: How to filter the elements. id: Get an element based on its ID. value: The value to filter for. target: How to open the clicked element (normal/tab/tab-bg/window). force_event: Force generating a fake click event. """ tab = self._current_widget() def single_cb(elem): """Click a single element.""" if elem is None: message.error("No element found with id {}!".format(value)) return try: elem.click(target, force_event=force_event) except webelem.Error as e: message.error(str(e)) return # def multiple_cb(elems): # """Click multiple elements (with only one expected).""" # if not elems: # message.error("No element found!") # return # elif len(elems) != 1: # message.error("{} elements found!".format(len(elems))) # return # elems[0].click(target) handlers = { 'id': (tab.elements.find_id, single_cb), } handler, callback = handlers[filter_] handler(value, callback) def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev): """Callback called from search/search_next/search_prev. Args: found: Whether the text was found. tab: The AbstractTab in which the search was made. old_scroll_pos: The scroll position (QPoint) before the search. options: The options (dict) the search was made with. text: The text searched for. prev: Whether we're searching backwards (i.e. :search-prev) """ # :search/:search-next without reverse -> down # :search/:search-next with reverse -> up # :search-prev without reverse -> up # :search-prev with reverse -> down going_up = options['reverse'] ^ prev if found: # Check if the scroll position got smaller and show info. if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y(): message.info("Search hit BOTTOM, continuing at TOP") elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y(): message.info("Search hit TOP, continuing at BOTTOM") else: message.warning("Text '{}' not found on page!".format(text), replace=True) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0) def search(self, text="", reverse=False): """Search for a text on the current page. With no text, clear results. Args: text: The text to search for. reverse: Reverse search direction. """ self.set_mark("'") tab = self._current_widget() if not text: if tab.search.search_displayed: tab.search.clear() return options = { 'ignore_case': config.val.search.ignore_case, 'reverse': reverse, } self._tabbed_browser.search_text = text self._tabbed_browser.search_options = dict(options) cb = functools.partial(self._search_cb, tab=tab, old_scroll_pos=tab.scroller.pos_px(), options=options, text=text, prev=False) options['result_cb'] = cb tab.search.search(text, **options) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def search_next(self, count=1): """Continue the search to the ([count]th) next term. Args: count: How many elements to ignore. """ tab = self._current_widget() window_text = self._tabbed_browser.search_text window_options = self._tabbed_browser.search_options if window_text is None: raise cmdexc.CommandError("No search done yet.") self.set_mark("'") if window_text is not None and window_text != tab.search.text: tab.search.clear() tab.search.search(window_text, **window_options) count -= 1 if count == 0: return cb = functools.partial(self._search_cb, tab=tab, old_scroll_pos=tab.scroller.pos_px(), options=window_options, text=window_text, prev=False) for _ in range(count - 1): tab.search.next_result() tab.search.next_result(result_cb=cb) @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('count', count=True) def search_prev(self, count=1): """Continue the search to the ([count]th) previous term. Args: count: How many elements to ignore. """ tab = self._current_widget() window_text = self._tabbed_browser.search_text window_options = self._tabbed_browser.search_options if window_text is None: raise cmdexc.CommandError("No search done yet.") self.set_mark("'") if window_text is not None and window_text != tab.search.text: tab.search.clear() tab.search.search(window_text, **window_options) count -= 1 if count == 0: return cb = functools.partial(self._search_cb, tab=tab, old_scroll_pos=tab.scroller.pos_px(), options=window_options, text=window_text, prev=True) for _ in range(count - 1): tab.search.prev_result() tab.search.prev_result(result_cb=cb) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_next_line(self, count=1): """Move the cursor or selection to the next line. Args: count: How many lines to move. """ self._current_widget().caret.move_to_next_line(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_prev_line(self, count=1): """Move the cursor or selection to the prev line. Args: count: How many lines to move. """ self._current_widget().caret.move_to_prev_line(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_next_char(self, count=1): """Move the cursor or selection to the next char. Args: count: How many lines to move. """ self._current_widget().caret.move_to_next_char(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_prev_char(self, count=1): """Move the cursor or selection to the previous char. Args: count: How many chars to move. """ self._current_widget().caret.move_to_prev_char(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_end_of_word(self, count=1): """Move the cursor or selection to the end of the word. Args: count: How many words to move. """ self._current_widget().caret.move_to_end_of_word(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_next_word(self, count=1): """Move the cursor or selection to the next word. Args: count: How many words to move. """ self._current_widget().caret.move_to_next_word(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_prev_word(self, count=1): """Move the cursor or selection to the previous word. Args: count: How many words to move. """ self._current_widget().caret.move_to_prev_word(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') def move_to_start_of_line(self): """Move the cursor or selection to the start of the line.""" self._current_widget().caret.move_to_start_of_line() @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') def move_to_end_of_line(self): """Move the cursor or selection to the end of line.""" self._current_widget().caret.move_to_end_of_line() @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_start_of_next_block(self, count=1): """Move the cursor or selection to the start of next block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_start_of_next_block(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_start_of_prev_block(self, count=1): """Move the cursor or selection to the start of previous block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_start_of_prev_block(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_end_of_next_block(self, count=1): """Move the cursor or selection to the end of next block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_end_of_next_block(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') @cmdutils.argument('count', count=True) def move_to_end_of_prev_block(self, count=1): """Move the cursor or selection to the end of previous block. Args: count: How many blocks to move. """ self._current_widget().caret.move_to_end_of_prev_block(count) @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') def move_to_start_of_document(self): """Move the cursor or selection to the start of the document.""" self._current_widget().caret.move_to_start_of_document() @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') def move_to_end_of_document(self): """Move the cursor or selection to the end of the document.""" self._current_widget().caret.move_to_end_of_document() @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') def toggle_selection(self): """Toggle caret selection mode.""" self._current_widget().caret.toggle_selection() @cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret], scope='window') def drop_selection(self): """Drop selection and keep selection mode enabled.""" self._current_widget().caret.drop_selection() @cmdutils.register(instance='command-dispatcher', scope='window', debug=True) @cmdutils.argument('count', count=True) def debug_webaction(self, action, count=1): """Execute a webaction. Available actions: http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit) http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine) Args: action: The action to execute, e.g. MoveToNextChar. count: How many times to repeat the action. """ tab = self._current_widget() for _ in range(count): try: tab.action.run_string(action) except browsertab.WebTabError as e: raise cmdexc.CommandError(str(e)) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0, no_cmd_split=True) def jseval(self, js_code, file=False, quiet=False, *, world: typing.Union[usertypes.JsWorld, int] = None): """Evaluate a JavaScript string. Args: js_code: The string/file to evaluate. file: Interpret js-code as a path to a file. If the path is relative, the file is searched in a js/ subdir in qutebrowser's data dir, e.g. `~/.local/share/qutebrowser/js`. quiet: Don't show resulting JS object. world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to run the snippet in. """ if world is None: world = usertypes.JsWorld.jseval if quiet: jseval_cb = None else: def jseval_cb(out): """Show the data returned from JS.""" if out is None: # Getting the actual error (if any) seems to be difficult. # The error does end up in # BrowserPage.javaScriptConsoleMessage(), but # distinguishing between :jseval errors and errors from the # webpage is not trivial... message.info('No output or error') else: # The output can be a string, number, dict, array, etc. But # *don't* output too much data, as this will make # qutebrowser hang out = str(out) if len(out) > 5000: out = out[:5000] + ' [...trimmed...]' message.info(out) if file: path = os.path.expanduser(js_code) if not os.path.isabs(path): path = os.path.join(standarddir.data(), 'js', path) try: with open(path, 'r', encoding='utf-8') as f: js_code = f.read() except OSError as e: raise cmdexc.CommandError(str(e)) widget = self._current_widget() widget.run_js_async(js_code, callback=jseval_cb, world=world) @cmdutils.register(instance='command-dispatcher', scope='window') def fake_key(self, keystring, global_=False): """Send a fake keypress or key string to the website or qutebrowser. :fake-key xy - sends the keychain 'xy' :fake-key <Ctrl-x> - sends Ctrl-x :fake-key <Escape> - sends the escape key Args: keystring: The keystring to send. global_: If given, the keys are sent to the qutebrowser UI. """ try: sequence = keyutils.KeySequence.parse(keystring) except keyutils.KeyParseError as e: raise cmdexc.CommandError(str(e)) for keyinfo in sequence: press_event = keyinfo.to_event(QEvent.KeyPress) release_event = keyinfo.to_event(QEvent.KeyRelease) if global_: window = QApplication.focusWindow() if window is None: raise cmdexc.CommandError("No focused window!") QApplication.postEvent(window, press_event) QApplication.postEvent(window, release_event) else: tab = self._current_widget() tab.send_event(press_event) tab.send_event(release_event) @cmdutils.register(instance='command-dispatcher', scope='window', debug=True, backend=usertypes.Backend.QtWebKit) def debug_clear_ssl_errors(self): """Clear remembered SSL error answers.""" self._current_widget().clear_ssl_errors() @cmdutils.register(instance='command-dispatcher', scope='window') def edit_url(self, url=None, bg=False, tab=False, window=False, private=False, related=False): """Navigate to a url formed in an external editor. The editor which should be launched can be configured via the `editor.command` config option. Args: url: URL to edit; defaults to the current page url. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. private: Open a new window in private browsing mode. related: If opening a new tab, position the tab as related to the current one (like clicking on a link). """ cmdutils.check_exclusive((tab, bg, window), 'tbw') old_url = self._current_url().toString() ed = editor.ExternalEditor(self._tabbed_browser) # Passthrough for openurl args (e.g. -t, -b, -w) ed.file_updated.connect(functools.partial( self._open_if_changed, old_url=old_url, bg=bg, tab=tab, window=window, private=private, related=related)) ed.edit(url or old_url) @cmdutils.register(instance='command-dispatcher', scope='window') def set_mark(self, key): """Set a mark at the current scroll position in the current tab. Args: key: mark identifier; capital indicates a global mark """ self._tabbed_browser.set_mark(key) @cmdutils.register(instance='command-dispatcher', scope='window') def jump_mark(self, key): """Jump to the mark named by `key`. Args: key: mark identifier; capital indicates a global mark """ self._tabbed_browser.jump_mark(key) def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False, window=False, private=False, related=False): """Open a URL unless it's already open in the tab. Args: old_url: The original URL to compare against. url: The URL to open. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. private: Open a new window in private browsing mode. related: If opening a new tab, position the tab as related to the current one (like clicking on a link). """ if bg or tab or window or private or related or url != old_url: self.openurl(url=url, bg=bg, tab=tab, window=window, private=private, related=related) @cmdutils.register(instance='command-dispatcher', scope='window') def fullscreen(self, leave=False): """Toggle fullscreen mode. Args: leave: Only leave fullscreen if it was entered by the page. """ if leave: tab = self._current_widget() try: tab.action.exit_fullscreen() except browsertab.UnsupportedOperationError: pass return window = self._tabbed_browser.widget.window() window.setWindowState(window.windowState() ^ Qt.WindowFullScreen) @cmdutils.register(instance='command-dispatcher', scope='window', name='tab-mute') @cmdutils.argument('count', count=True) def tab_mute(self, count=None): """Mute/Unmute the current/[count]th tab. Args: count: The tab index to mute or unmute, or None """ tab = self._cntwidget(count) if tab is None: return try: tab.audio.toggle_muted() except browsertab.WebTabError as e: raise cmdexc.CommandError(e)
1
21,896
This prevents negative IDs, but it doesn't check for too large IDs. You should probably just do the same check you do for `QWebengineScript` here as well.
qutebrowser-qutebrowser
py
@@ -18,6 +18,7 @@ const ( // Command specific flags. dockerFileFlag = "dockerfile" imageTagFlag = "tag" + awsTagsFlag = "tags" stackOutputDirFlag = "output-dir" limitFlag = "limit" followFlag = "follow"
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli // Long flag names. const ( // Common flags. projectFlag = "project" nameFlag = "name" appFlag = "app" envFlag = "env" appTypeFlag = "app-type" profileFlag = "profile" yesFlag = "yes" jsonFlag = "json" // Command specific flags. dockerFileFlag = "dockerfile" imageTagFlag = "tag" stackOutputDirFlag = "output-dir" limitFlag = "limit" followFlag = "follow" sinceFlag = "since" startTimeFlag = "start-time" endTimeFlag = "end-time" envProfilesFlag = "env-profiles" prodEnvFlag = "prod" deployFlag = "deploy" resourcesFlag = "resources" githubURLFlag = "github-url" githubAccessTokenFlag = "github-access-token" gitBranchFlag = "git-branch" envsFlag = "environments" domainNameFlag = "domain" localAppFlag = "local" deleteSecretFlag = "delete-secret" appPortFlag = "port" ) // Short flag names. // A short flag only exists if the flag is mandatory by the command. const ( projectFlagShort = "p" nameFlagShort = "n" appFlagShort = "a" envFlagShort = "e" appTypeFlagShort = "t" dockerFileFlagShort = "d" githubURLFlagShort = "u" githubAccessTokenFlagShort = "t" gitBranchFlagShort = "b" envsFlagShort = "e" ) // Descriptions for flags. const ( projectFlagDescription = "Name of the project." appFlagDescription = "Name of the application." envFlagDescription = "Name of the environment." appTypeFlagDescription = "Type of application to create." profileFlagDescription = "Name of the profile." yesFlagDescription = "Skips confirmation prompt." jsonFlagDescription = "Optional. Outputs in JSON format." dockerFileFlagDescription = "Path to the Dockerfile." imageTagFlagDescription = `Optional. The application's image tag.` stackOutputDirFlagDescription = "Optional. Writes the stack template and template configuration to a directory." prodEnvFlagDescription = "If the environment contains production services." limitFlagDescription = "Optional. The maximum number of log events returned." followFlagDescription = "Optional. Specifies if the logs should be streamed." sinceFlagDescription = `Optional. Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of start-time / since may be used.` startTimeFlagDescription = `Optional. Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of start-time / since may be used.` endTimeFlagDescription = `Optional. Only return logs before a specific date (RFC3339). Defaults to all logs. Only one of end-time / follow may be used.` deployTestFlagDescription = `Deploy your application to a "test" environment.` githubURLFlagDescription = "GitHub repository URL for your application." githubAccessTokenFlagDescription = "GitHub personal access token for your repository." gitBranchFlagDescription = "Branch used to trigger your pipeline." pipelineEnvsFlagDescription = "Environments to add to the pipeline." domainNameFlagDescription = "Optional. Your existing custom domain name." resourcesFlagDescription = "Optional. Show the resources of your application." localAppFlagDescription = "Only show applications in the current directory." envProfilesFlagDescription = "Optional. Environments and the profile to use to delete the environment." deleteSecretFlagDescription = "Deletes AWS Secrets Manager secret associated with a pipeline source repository." appPortFlagDescription = "Optional. The port on which your Dockerfile listens." )
1
12,424
This might be very confusing. Maybe `resource-tags`? I
aws-copilot-cli
go
@@ -283,6 +283,7 @@ class DeformConvPack(DeformConv): kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), + dilation=self.dilation, bias=True) self.init_offset()
1
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair, _single from mmdet.utils import print_log from . import deform_conv_cuda class DeformConvFunction(Function): @staticmethod def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, im2col_step=64): if input is not None and input.dim() != 4: raise ValueError( 'Expected 4D tensor as input, got {}D tensor instead.'.format( input.dim())) ctx.stride = _pair(stride) ctx.padding = _pair(padding) ctx.dilation = _pair(dilation) ctx.groups = groups ctx.deformable_groups = deformable_groups ctx.im2col_step = im2col_step ctx.save_for_backward(input, offset, weight) output = input.new_empty( DeformConvFunction._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)) ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones if not input.is_cuda: raise NotImplementedError else: cur_im2col_step = min(ctx.im2col_step, input.shape[0]) assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize' deform_conv_cuda.deform_conv_forward_cuda( input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step) return output @staticmethod @once_differentiable def backward(ctx, grad_output): input, offset, weight = ctx.saved_tensors grad_input = grad_offset = grad_weight = None if not grad_output.is_cuda: raise NotImplementedError else: cur_im2col_step = min(ctx.im2col_step, input.shape[0]) assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize' if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) deform_conv_cuda.deform_conv_backward_input_cuda( input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step) if ctx.needs_input_grad[2]: grad_weight = torch.zeros_like(weight) deform_conv_cuda.deform_conv_backward_parameters_cuda( input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, cur_im2col_step) return (grad_input, grad_offset, grad_weight, None, None, None, None, None) @staticmethod def _output_size(input, weight, padding, dilation, stride): channels = weight.size(0) output_size = (input.size(0), channels) for d in range(input.dim() - 2): in_size = input.size(d + 2) pad = padding[d] kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 stride_ = stride[d] output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) if not all(map(lambda s: s > 0, output_size)): raise ValueError( 'convolution input is too small (output would be {})'.format( 'x'.join(map(str, output_size)))) return output_size class ModulatedDeformConvFunction(Function): @staticmethod def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1): ctx.stride = stride ctx.padding = padding ctx.dilation = dilation ctx.groups = groups ctx.deformable_groups = deformable_groups ctx.with_bias = bias is not None if not ctx.with_bias: bias = input.new_empty(1) # fake tensor if not input.is_cuda: raise NotImplementedError if weight.requires_grad or mask.requires_grad or offset.requires_grad \ or input.requires_grad: ctx.save_for_backward(input, offset, mask, weight, bias) output = input.new_empty( ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) ctx._bufs = [input.new_empty(0), input.new_empty(0)] deform_conv_cuda.modulated_deform_conv_cuda_forward( input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias) return output @staticmethod @once_differentiable def backward(ctx, grad_output): if not grad_output.is_cuda: raise NotImplementedError input, offset, mask, weight, bias = ctx.saved_tensors grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) grad_mask = torch.zeros_like(mask) grad_weight = torch.zeros_like(weight) grad_bias = torch.zeros_like(bias) deform_conv_cuda.modulated_deform_conv_cuda_backward( input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias) if not ctx.with_bias: grad_bias = None return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None) @staticmethod def _infer_shape(ctx, input, weight): n = input.size(0) channels_out = weight.size(0) height, width = input.shape[2:4] kernel_h, kernel_w = weight.shape[2:4] height_out = (height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1 width_out = (width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1 return n, channels_out, height_out, width_out deform_conv = DeformConvFunction.apply modulated_deform_conv = ModulatedDeformConvFunction.apply class DeformConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=False): super(DeformConv, self).__init__() assert not bias assert in_channels % groups == 0, \ 'in_channels {} cannot be divisible by groups {}'.format( in_channels, groups) assert out_channels % groups == 0, \ 'out_channels {} cannot be divisible by groups {}'.format( out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deformable_groups = deformable_groups # enable compatibility with nn.Conv2d self.transposed = False self.output_padding = _single(0) self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1. / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) def forward(self, x, offset): # To fix an assert error in deform_conv_cuda.cpp:128 # input image is smaller than kernel input_pad = ( x.size(2) < self.kernel_size[0] or x.size(3) < self.kernel_size[1]) if input_pad: pad_h = max(self.kernel_size[0] - x.size(2), 0) pad_w = max(self.kernel_size[1] - x.size(3), 0) x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous() offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0).contiguous() out = deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) if input_pad: out = out[:, :, :out.size(2) - pad_h, :out.size(3) - pad_w].contiguous() return out class DeformConvPack(DeformConv): """A Deformable Conv Encapsulation that acts as normal Conv layers. Args: in_channels (int): Same as nn.Conv2d. out_channels (int): Same as nn.Conv2d. kernel_size (int or tuple[int]): Same as nn.Conv2d. stride (int or tuple[int]): Same as nn.Conv2d. padding (int or tuple[int]): Same as nn.Conv2d. dilation (int or tuple[int]): Same as nn.Conv2d. groups (int): Same as nn.Conv2d. bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if norm_cfg is None, otherwise False. """ _version = 2 def __init__(self, *args, **kwargs): super(DeformConvPack, self).__init__(*args, **kwargs) self.conv_offset = nn.Conv2d( self.in_channels, self.deformable_groups * 2 * self.kernel_size[0] * self.kernel_size[1], kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True) self.init_offset() def init_offset(self): self.conv_offset.weight.data.zero_() self.conv_offset.bias.data.zero_() def forward(self, x): offset = self.conv_offset(x) return deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if version is None or version < 2: # the key is different in early versions # In version < 2, DeformConvPack loads previous benchmark models. if (prefix + 'conv_offset.weight' not in state_dict and prefix[:-1] + '_offset.weight' in state_dict): state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( prefix[:-1] + '_offset.weight') if (prefix + 'conv_offset.bias' not in state_dict and prefix[:-1] + '_offset.bias' in state_dict): state_dict[prefix + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + '_offset.bias') if version is not None and version > 1: print_log( 'DeformConvPack {} is upgraded to version 2.'.format( prefix.rstrip('.')), logger='root') super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) class ModulatedDeformConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True): super(ModulatedDeformConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.deformable_groups = deformable_groups self.with_bias = bias # enable compatibility with nn.Conv2d self.transposed = False self.output_padding = _single(0) self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1. / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.zero_() def forward(self, x, offset, mask): return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) class ModulatedDeformConvPack(ModulatedDeformConv): """A ModulatedDeformable Conv Encapsulation that acts as normal Conv layers. Args: in_channels (int): Same as nn.Conv2d. out_channels (int): Same as nn.Conv2d. kernel_size (int or tuple[int]): Same as nn.Conv2d. stride (int or tuple[int]): Same as nn.Conv2d. padding (int or tuple[int]): Same as nn.Conv2d. dilation (int or tuple[int]): Same as nn.Conv2d. groups (int): Same as nn.Conv2d. bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if norm_cfg is None, otherwise False. """ _version = 2 def __init__(self, *args, **kwargs): super(ModulatedDeformConvPack, self).__init__(*args, **kwargs) self.conv_offset = nn.Conv2d( self.in_channels, self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1], kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True) self.init_offset() def init_offset(self): self.conv_offset.weight.data.zero_() self.conv_offset.bias.data.zero_() def forward(self, x): out = self.conv_offset(x) o1, o2, mask = torch.chunk(out, 3, dim=1) offset = torch.cat((o1, o2), dim=1) mask = torch.sigmoid(mask) return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if version is None or version < 2: # the key is different in early versions # In version < 2, ModulatedDeformConvPack # loads previous benchmark models. if (prefix + 'conv_offset.weight' not in state_dict and prefix[:-1] + '_offset.weight' in state_dict): state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( prefix[:-1] + '_offset.weight') if (prefix + 'conv_offset.bias' not in state_dict and prefix[:-1] + '_offset.bias' in state_dict): state_dict[prefix + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + '_offset.bias') if version is not None and version > 1: print_log( 'ModulatedDeformConvPack {} is upgraded to version 2.'.format( prefix.rstrip('.')), logger='root') super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
1
19,170
We may use `_pair` to wrap `dilation`.
open-mmlab-mmdetection
py
@@ -286,15 +286,9 @@ func createLibcontainerConfig(cgroupName string, spec *specs.LinuxSpec) (*config return nil, err } config.Cgroups = c - if config.Readonlyfs { - setReadonly(config) - config.MaskPaths = []string{ - "/proc/kcore", - } - config.ReadonlyPaths = []string{ - "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", - } - } + // set extra path masking for libcontainer for the various unsafe places in proc + config.MaskPaths = maskedPaths + config.ReadonlyPaths = readonlyPaths seccomp, err := setupSeccomp(&spec.Linux.Seccomp) if err != nil { return nil, err
1
// +build linux package main import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/seccomp" libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" "github.com/opencontainers/specs" ) var specCommand = cli.Command{ Name: "spec", Usage: "create a new specification file", ArgsUsage: "", Description: `The spec command creates the new specification file named "` + specConfig + `" for the bundle." `, Flags: []cli.Flag{ cli.StringFlag{ Name: "bundle, b", Value: "", Usage: "path to the root of the bundle directory", }, }, Action: func(context *cli.Context) { spec := specs.LinuxSpec{ Spec: specs.Spec{ Version: specs.Version, Platform: specs.Platform{ OS: runtime.GOOS, Arch: runtime.GOARCH, }, Root: specs.Root{ Path: "rootfs", Readonly: true, }, Process: specs.Process{ Terminal: true, User: specs.User{}, Args: []string{ "sh", }, Env: []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", }, Cwd: "/", }, Hostname: "shell", Mounts: []specs.Mount{ { Destination: "/proc", Type: "proc", Source: "proc", Options: nil, }, { Destination: "/dev", Type: "tmpfs", Source: "tmpfs", Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, }, { Destination: "/dev/pts", Type: "devpts", Source: "devpts", Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, }, { Destination: "/dev/shm", Type: "tmpfs", Source: "shm", Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, }, { Destination: "/dev/mqueue", Type: "mqueue", Source: "mqueue", Options: []string{"nosuid", "noexec", "nodev"}, }, { Destination: "/sys", Type: "sysfs", Source: "sysfs", Options: []string{"nosuid", "noexec", "nodev"}, }, { Destination: "/sys/fs/cgroup", Type: "cgroup", Source: "cgroup", Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, }, }, }, Linux: specs.Linux{ Capabilities: []string{ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE", }, Resources: &specs.Resources{ Devices: []specs.DeviceCgroup{ { Allow: false, Access: sPtr("rwm"), }, }, }, Namespaces: []specs.Namespace{ { Type: "pid", }, { Type: "network", }, { Type: "ipc", }, { Type: "uts", }, { Type: "mount", }, }, Rlimits: []specs.Rlimit{ { Type: "RLIMIT_NOFILE", Hard: uint64(1024), Soft: uint64(1024), }, }, NoNewPrivileges: true, }, } checkNoFile := func(name string) error { _, err := os.Stat(name) if err == nil { return fmt.Errorf("File %s exists. Remove it first", name) } if !os.IsNotExist(err) { return err } return nil } bundle := context.String("bundle") if bundle != "" { if err := os.Chdir(bundle); err != nil { fatal(err) } } if err := checkNoFile(specConfig); err != nil { logrus.Fatal(err) } data, err := json.MarshalIndent(&spec, "", "\t") if err != nil { logrus.Fatal(err) } if err := ioutil.WriteFile(specConfig, data, 0666); err != nil { logrus.Fatal(err) } }, } func sPtr(s string) *string { return &s } func rPtr(r rune) *rune { return &r } func iPtr(i int64) *int64 { return &i } func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } var namespaceMapping = map[specs.NamespaceType]configs.NamespaceType{ specs.PIDNamespace: configs.NEWPID, specs.NetworkNamespace: configs.NEWNET, specs.MountNamespace: configs.NEWNS, specs.UserNamespace: configs.NEWUSER, specs.IPCNamespace: configs.NEWIPC, specs.UTSNamespace: configs.NEWUTS, } var mountPropagationMapping = map[string]int{ "rprivate": syscall.MS_PRIVATE | syscall.MS_REC, "private": syscall.MS_PRIVATE, "rslave": syscall.MS_SLAVE | syscall.MS_REC, "slave": syscall.MS_SLAVE, "rshared": syscall.MS_SHARED | syscall.MS_REC, "shared": syscall.MS_SHARED, "": syscall.MS_PRIVATE | syscall.MS_REC, } // validateSpec validates the fields in the spec // TODO: Add validation for other fields where applicable func validateSpec(spec *specs.LinuxSpec) error { if spec.Process.Cwd == "" { return fmt.Errorf("Cwd property must not be empty") } if !filepath.IsAbs(spec.Process.Cwd) { return fmt.Errorf("Cwd must be an absolute path") } return nil } // loadSpec loads the specification from the provided path. // If the path is empty then the default path will be "config.json" func loadSpec(cPath string) (spec *specs.LinuxSpec, err error) { cf, err := os.Open(cPath) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("JSON specification file %s not found", cPath) } return spec, err } defer cf.Close() if err = json.NewDecoder(cf).Decode(&spec); err != nil { return spec, err } return spec, validateSpec(spec) } func createLibcontainerConfig(cgroupName string, spec *specs.LinuxSpec) (*configs.Config, error) { cwd, err := os.Getwd() if err != nil { return nil, err } rootfsPath := spec.Root.Path if !filepath.IsAbs(rootfsPath) { rootfsPath = filepath.Join(cwd, rootfsPath) } config := &configs.Config{ Rootfs: rootfsPath, Capabilities: spec.Linux.Capabilities, Readonlyfs: spec.Root.Readonly, Hostname: spec.Hostname, } exists := false if config.RootPropagation, exists = mountPropagationMapping[spec.Linux.RootfsPropagation]; !exists { return nil, fmt.Errorf("rootfsPropagation=%v is not supported", spec.Linux.RootfsPropagation) } for _, ns := range spec.Linux.Namespaces { t, exists := namespaceMapping[ns.Type] if !exists { return nil, fmt.Errorf("namespace %q does not exist", ns) } config.Namespaces.Add(t, ns.Path) } if config.Namespaces.Contains(configs.NEWNET) { config.Networks = []*configs.Network{ { Type: "loopback", }, } } for _, m := range spec.Mounts { config.Mounts = append(config.Mounts, createLibcontainerMount(cwd, m)) } if err := createDevices(spec, config); err != nil { return nil, err } if err := setupUserNamespace(spec, config); err != nil { return nil, err } for _, rlimit := range spec.Linux.Rlimits { rl, err := createLibContainerRlimit(rlimit) if err != nil { return nil, err } config.Rlimits = append(config.Rlimits, rl) } c, err := createCgroupConfig(cgroupName, spec) if err != nil { return nil, err } config.Cgroups = c if config.Readonlyfs { setReadonly(config) config.MaskPaths = []string{ "/proc/kcore", } config.ReadonlyPaths = []string{ "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", } } seccomp, err := setupSeccomp(&spec.Linux.Seccomp) if err != nil { return nil, err } config.Seccomp = seccomp config.Sysctl = spec.Linux.Sysctl config.ProcessLabel = spec.Linux.SelinuxProcessLabel config.AppArmorProfile = spec.Linux.ApparmorProfile config.NoNewPrivileges = spec.Linux.NoNewPrivileges for _, g := range spec.Process.User.AdditionalGids { config.AdditionalGroups = append(config.AdditionalGroups, strconv.FormatUint(uint64(g), 10)) } createHooks(spec, config) config.Version = specs.Version return config, nil } func createLibcontainerMount(cwd string, m specs.Mount) *configs.Mount { flags, pgflags, data := parseMountOptions(m.Options) source := m.Source if m.Type == "bind" { if !filepath.IsAbs(source) { source = filepath.Join(cwd, m.Source) } } return &configs.Mount{ Device: m.Type, Source: source, Destination: m.Destination, Data: data, Flags: flags, PropagationFlags: pgflags, } } func createCgroupConfig(name string, spec *specs.LinuxSpec) (*configs.Cgroup, error) { var ( err error myCgroupPath string ) if spec.Linux.CgroupsPath != nil { myCgroupPath = libcontainerUtils.CleanPath(*spec.Linux.CgroupsPath) } else { myCgroupPath, err = cgroups.GetThisCgroupDir("devices") if err != nil { return nil, err } myCgroupPath = filepath.Join(myCgroupPath, name) } c := &configs.Cgroup{ Path: myCgroupPath, Resources: &configs.Resources{}, } c.Resources.AllowedDevices = allowedDevices r := spec.Linux.Resources if r == nil { return c, nil } for i, d := range spec.Linux.Resources.Devices { var ( t = 'a' major = int64(-1) minor = int64(-1) ) if d.Type != nil { t = *d.Type } if d.Major != nil { major = *d.Major } if d.Minor != nil { minor = *d.Minor } if d.Access == nil || *d.Access == "" { return nil, fmt.Errorf("device access at %d field canot be empty", i) } dd := &configs.Device{ Type: t, Major: major, Minor: minor, Permissions: *d.Access, Allow: d.Allow, } c.Resources.Devices = append(c.Resources.Devices, dd) } // append the default allowed devices to the end of the list c.Resources.Devices = append(c.Resources.Devices, allowedDevices...) if r.Memory != nil { if r.Memory.Limit != nil { c.Resources.Memory = int64(*r.Memory.Limit) } if r.Memory.Reservation != nil { c.Resources.MemoryReservation = int64(*r.Memory.Reservation) } if r.Memory.Swap != nil { c.Resources.MemorySwap = int64(*r.Memory.Swap) } if r.Memory.Kernel != nil { c.Resources.KernelMemory = int64(*r.Memory.Kernel) } if r.Memory.Swappiness != nil { swappiness := int64(*r.Memory.Swappiness) c.Resources.MemorySwappiness = &swappiness } } if r.CPU != nil { if r.CPU.Shares != nil { c.Resources.CpuShares = int64(*r.CPU.Shares) } if r.CPU.Quota != nil { c.Resources.CpuQuota = int64(*r.CPU.Quota) } if r.CPU.Period != nil { c.Resources.CpuPeriod = int64(*r.CPU.Period) } if r.CPU.RealtimeRuntime != nil { c.Resources.CpuRtRuntime = int64(*r.CPU.RealtimeRuntime) } if r.CPU.RealtimePeriod != nil { c.Resources.CpuRtPeriod = int64(*r.CPU.RealtimePeriod) } if r.CPU.Cpus != nil { c.Resources.CpusetCpus = *r.CPU.Cpus } if r.CPU.Mems != nil { c.Resources.CpusetMems = *r.CPU.Mems } } if r.Pids != nil { c.Resources.PidsLimit = *r.Pids.Limit } if r.BlockIO != nil { if r.BlockIO.Weight != nil { c.Resources.BlkioWeight = *r.BlockIO.Weight } if r.BlockIO.LeafWeight != nil { c.Resources.BlkioLeafWeight = *r.BlockIO.LeafWeight } if r.BlockIO.WeightDevice != nil { for _, wd := range r.BlockIO.WeightDevice { weightDevice := configs.NewWeightDevice(wd.Major, wd.Minor, *wd.Weight, *wd.LeafWeight) c.Resources.BlkioWeightDevice = append(c.Resources.BlkioWeightDevice, weightDevice) } } if r.BlockIO.ThrottleReadBpsDevice != nil { for _, td := range r.BlockIO.ThrottleReadBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadBpsDevice = append(c.Resources.BlkioThrottleReadBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteBpsDevice != nil { for _, td := range r.BlockIO.ThrottleWriteBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteBpsDevice = append(c.Resources.BlkioThrottleWriteBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleReadIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleReadIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadIOPSDevice = append(c.Resources.BlkioThrottleReadIOPSDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleWriteIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteIOPSDevice = append(c.Resources.BlkioThrottleWriteIOPSDevice, throttleDevice) } } } for _, l := range r.HugepageLimits { c.Resources.HugetlbLimit = append(c.Resources.HugetlbLimit, &configs.HugepageLimit{ Pagesize: *l.Pagesize, Limit: *l.Limit, }) } if r.DisableOOMKiller != nil { c.Resources.OomKillDisable = *r.DisableOOMKiller } if r.Network != nil { if r.Network.ClassID != nil { c.Resources.NetClsClassid = string(*r.Network.ClassID) } for _, m := range r.Network.Priorities { c.Resources.NetPrioIfpriomap = append(c.Resources.NetPrioIfpriomap, &configs.IfPrioMap{ Interface: m.Name, Priority: int64(m.Priority), }) } } return c, nil } func createDevices(spec *specs.LinuxSpec, config *configs.Config) error { // add whitelisted devices config.Devices = []*configs.Device{ { Type: 'c', Path: "/dev/null", Major: 1, Minor: 3, FileMode: 0666, Uid: 0, Gid: 0, }, { Type: 'c', Path: "/dev/random", Major: 1, Minor: 8, FileMode: 0666, Uid: 0, Gid: 0, }, { Type: 'c', Path: "/dev/full", Major: 1, Minor: 7, FileMode: 0666, Uid: 0, Gid: 0, }, { Type: 'c', Path: "/dev/tty", Major: 5, Minor: 0, FileMode: 0666, Uid: 0, Gid: 0, }, { Type: 'c', Path: "/dev/zero", Major: 1, Minor: 5, FileMode: 0666, Uid: 0, Gid: 0, }, { Type: 'c', Path: "/dev/urandom", Major: 1, Minor: 9, FileMode: 0666, Uid: 0, Gid: 0, }, } // merge in additional devices from the spec for _, d := range spec.Linux.Devices { var uid, gid uint32 if d.UID != nil { uid = *d.UID } if d.GID != nil { gid = *d.GID } device := &configs.Device{ Type: d.Type, Path: d.Path, Major: d.Major, Minor: d.Minor, FileMode: *d.FileMode, Uid: uid, Gid: gid, } config.Devices = append(config.Devices, device) } return nil } func setReadonly(config *configs.Config) { for _, m := range config.Mounts { if m.Device == "sysfs" { m.Flags |= syscall.MS_RDONLY } } } func setupUserNamespace(spec *specs.LinuxSpec, config *configs.Config) error { if len(spec.Linux.UIDMappings) == 0 { return nil } config.Namespaces.Add(configs.NEWUSER, "") create := func(m specs.IDMapping) configs.IDMap { return configs.IDMap{ HostID: int(m.HostID), ContainerID: int(m.ContainerID), Size: int(m.Size), } } for _, m := range spec.Linux.UIDMappings { config.UidMappings = append(config.UidMappings, create(m)) } for _, m := range spec.Linux.GIDMappings { config.GidMappings = append(config.GidMappings, create(m)) } rootUID, err := config.HostUID() if err != nil { return err } rootGID, err := config.HostGID() if err != nil { return err } for _, node := range config.Devices { node.Uid = uint32(rootUID) node.Gid = uint32(rootGID) } return nil } func createLibContainerRlimit(rlimit specs.Rlimit) (configs.Rlimit, error) { rl, err := strToRlimit(rlimit.Type) if err != nil { return configs.Rlimit{}, err } return configs.Rlimit{ Type: rl, Hard: uint64(rlimit.Hard), Soft: uint64(rlimit.Soft), }, nil } // parseMountOptions parses the string and returns the flags, propagation // flags and any mount data that it contains. func parseMountOptions(options []string) (int, []int, string) { var ( flag int pgflag []int data []string ) flags := map[string]struct { clear bool flag int }{ "async": {true, syscall.MS_SYNCHRONOUS}, "atime": {true, syscall.MS_NOATIME}, "bind": {false, syscall.MS_BIND}, "defaults": {false, 0}, "dev": {true, syscall.MS_NODEV}, "diratime": {true, syscall.MS_NODIRATIME}, "dirsync": {false, syscall.MS_DIRSYNC}, "exec": {true, syscall.MS_NOEXEC}, "mand": {false, syscall.MS_MANDLOCK}, "noatime": {false, syscall.MS_NOATIME}, "nodev": {false, syscall.MS_NODEV}, "nodiratime": {false, syscall.MS_NODIRATIME}, "noexec": {false, syscall.MS_NOEXEC}, "nomand": {true, syscall.MS_MANDLOCK}, "norelatime": {true, syscall.MS_RELATIME}, "nostrictatime": {true, syscall.MS_STRICTATIME}, "nosuid": {false, syscall.MS_NOSUID}, "rbind": {false, syscall.MS_BIND | syscall.MS_REC}, "relatime": {false, syscall.MS_RELATIME}, "remount": {false, syscall.MS_REMOUNT}, "ro": {false, syscall.MS_RDONLY}, "rw": {true, syscall.MS_RDONLY}, "strictatime": {false, syscall.MS_STRICTATIME}, "suid": {true, syscall.MS_NOSUID}, "sync": {false, syscall.MS_SYNCHRONOUS}, } propagationFlags := map[string]struct { clear bool flag int }{ "private": {false, syscall.MS_PRIVATE}, "shared": {false, syscall.MS_SHARED}, "slave": {false, syscall.MS_SLAVE}, "unbindable": {false, syscall.MS_UNBINDABLE}, "rprivate": {false, syscall.MS_PRIVATE | syscall.MS_REC}, "rshared": {false, syscall.MS_SHARED | syscall.MS_REC}, "rslave": {false, syscall.MS_SLAVE | syscall.MS_REC}, "runbindable": {false, syscall.MS_UNBINDABLE | syscall.MS_REC}, } for _, o := range options { // If the option does not exist in the flags table or the flag // is not supported on the platform, // then it is a data value for a specific fs type if f, exists := flags[o]; exists && f.flag != 0 { if f.clear { flag &= ^f.flag } else { flag |= f.flag } } else if f, exists := propagationFlags[o]; exists && f.flag != 0 { pgflag = append(pgflag, f.flag) } else { data = append(data, o) } } return flag, pgflag, strings.Join(data, ",") } func setupSeccomp(config *specs.Seccomp) (*configs.Seccomp, error) { if config == nil { return nil, nil } // No default action specified, no syscalls listed, assume seccomp disabled if config.DefaultAction == "" && len(config.Syscalls) == 0 { return nil, nil } newConfig := new(configs.Seccomp) newConfig.Syscalls = []*configs.Syscall{} if len(config.Architectures) > 0 { newConfig.Architectures = []string{} for _, arch := range config.Architectures { newArch, err := seccomp.ConvertStringToArch(string(arch)) if err != nil { return nil, err } newConfig.Architectures = append(newConfig.Architectures, newArch) } } // Convert default action from string representation newDefaultAction, err := seccomp.ConvertStringToAction(string(config.DefaultAction)) if err != nil { return nil, err } newConfig.DefaultAction = newDefaultAction // Loop through all syscall blocks and convert them to libcontainer format for _, call := range config.Syscalls { newAction, err := seccomp.ConvertStringToAction(string(call.Action)) if err != nil { return nil, err } newCall := configs.Syscall{ Name: call.Name, Action: newAction, Args: []*configs.Arg{}, } // Loop through all the arguments of the syscall and convert them for _, arg := range call.Args { newOp, err := seccomp.ConvertStringToOperator(string(arg.Op)) if err != nil { return nil, err } newArg := configs.Arg{ Index: arg.Index, Value: arg.Value, ValueTwo: arg.ValueTwo, Op: newOp, } newCall.Args = append(newCall.Args, &newArg) } newConfig.Syscalls = append(newConfig.Syscalls, &newCall) } return newConfig, nil } func createHooks(rspec *specs.LinuxSpec, config *configs.Config) { config.Hooks = &configs.Hooks{} for _, h := range rspec.Hooks.Prestart { cmd := configs.Command{ Path: h.Path, Args: h.Args, Env: h.Env, } config.Hooks.Prestart = append(config.Hooks.Prestart, configs.NewCommandHook(cmd)) } for _, h := range rspec.Hooks.Poststart { cmd := configs.Command{ Path: h.Path, Args: h.Args, Env: h.Env, } config.Hooks.Poststart = append(config.Hooks.Poststart, configs.NewCommandHook(cmd)) } for _, h := range rspec.Hooks.Poststop { cmd := configs.Command{ Path: h.Path, Args: h.Args, Env: h.Env, } config.Hooks.Poststop = append(config.Hooks.Poststop, configs.NewCommandHook(cmd)) } }
1
9,975
what's with `setReadonly`? Why we deleted it totally?
opencontainers-runc
go
@@ -121,12 +121,7 @@ CXXToken * cxxTagSetTypeField( { CXX_DEBUG_ASSERT(tag && pTypeStart && pTypeEnd,"Non null parameters are expected"); - const char * szTypeRef0; - - // "typename" is debatable since it's not really - // allowed by C++ for unqualified types. However I haven't been able - // to come up with something better... so "typename" it is for now. - static const char * szTypename = "typename"; + const char * szTypeRef0 = NULL; if(pTypeStart != pTypeEnd) {
1
/* * Copyright (c) 2016, Szymon Tomasz Stefanek * * This source code is released for free distribution under the terms of the * GNU General Public License version 2 or (at your option) any later version. * * This module contains functions for parsing and scanning C++ source files */ #include "cxx_tag.h" #include "cxx_scope.h" #include "cxx_debug.h" #include "cxx_token_chain.h" #include "entry.h" #include "get.h" #include "routines.h" #include "xtag.h" static roleDesc CMacroRoles [] = { RoleTemplateUndef, }; static roleDesc CHeaderRoles [] = { RoleTemplateSystem, RoleTemplateLocal, }; static kindOption g_aCXXKinds [] = { { TRUE, 'c', "class", "classes" }, { TRUE, 'd', "macro", "macro definitions", .referenceOnly = FALSE, ATTACH_ROLES(CMacroRoles) }, { TRUE, 'e', "enumerator", "enumerators (values inside an enumeration)" }, { TRUE, 'f', "function", "function definitions" }, { TRUE, 'g', "enum", "enumeration names" }, { FALSE, 'h', "header", "included header files", .referenceOnly = TRUE, ATTACH_ROLES(CHeaderRoles) }, { FALSE, 'l', "local", "local variables" }, { TRUE, 'm', "member", "class, struct, and union members" }, { TRUE, 'n', "namespace", "namespaces" }, { FALSE, 'p', "prototype", "function prototypes" }, { TRUE, 's', "struct", "structure names" }, { TRUE, 't', "typedef", "typedefs" }, { TRUE, 'u', "union", "union names" }, { TRUE, 'v', "variable", "variable definitions" }, { FALSE, 'x', "externvar", "external and forward variable declarations" }, { FALSE, 'z', "parameter", "function parameters inside function definitions" }, { FALSE, 'L', "label", "goto labels" }, // FIXME: not sure about referenceOnly: if this is referenceOnly then // so is externvar and probably also prototype. { FALSE, 'N', "name", "names imported via using scope::symbol" /*, .referenceOnly = TRUE*/ }, { FALSE, 'U', "using", "using namespace statements", .referenceOnly = TRUE }, }; static const char * g_aCXXAccessStrings [] = { NULL, "public", "private", "protected", }; kindOption * cxxTagGetKindOptions(void) { return g_aCXXKinds; } int cxxTagGetKindOptionCount(void) { return sizeof(g_aCXXKinds) / sizeof(kindOption); } boolean cxxTagKindEnabled(enum CXXTagKind eKindId) { return g_aCXXKinds[eKindId].enabled; } static tagEntryInfo g_oCXXTag; tagEntryInfo * cxxTagBegin(enum CXXTagKind eKindId,CXXToken * pToken) { if(!g_aCXXKinds[eKindId].enabled) { //CXX_DEBUG_PRINT("Tag kind %s is not enabled",g_aCXXKinds[eKindId].name); return NULL; } initTagEntry( &g_oCXXTag, vStringValue(pToken->pszWord), &(g_aCXXKinds[eKindId]) ); g_oCXXTag.lineNumber = pToken->iLineNumber; g_oCXXTag.filePosition = pToken->oFilePosition; g_oCXXTag.isFileScope = FALSE; if(!cxxScopeIsGlobal()) { g_oCXXTag.extensionFields.scopeKind = &g_aCXXKinds[cxxScopeGetKind()]; g_oCXXTag.extensionFields.scopeName = cxxScopeGetFullName(); } // FIXME: meaning of "is file scope" is quite debatable... g_oCXXTag.extensionFields.access = g_aCXXAccessStrings[cxxScopeGetAccess()]; return &g_oCXXTag; } CXXToken * cxxTagSetTypeField( tagEntryInfo * tag, CXXToken * pTypeStart, CXXToken * pTypeEnd ) { CXX_DEBUG_ASSERT(tag && pTypeStart && pTypeEnd,"Non null parameters are expected"); const char * szTypeRef0; // "typename" is debatable since it's not really // allowed by C++ for unqualified types. However I haven't been able // to come up with something better... so "typename" it is for now. static const char * szTypename = "typename"; if(pTypeStart != pTypeEnd) { // Note that this does not work for types like "const enum X" // But that's not backward compatible anyway, so we live with it. if( cxxTokenTypeIs(pTypeStart,CXXTokenTypeKeyword) && cxxKeywordIsTypeRefMarker(pTypeStart->eKeyword) ) { szTypeRef0 = cxxKeywordName(pTypeStart->eKeyword); pTypeStart = pTypeStart->pNext; } else { szTypeRef0 = szTypename; } } else { szTypeRef0 = szTypename; } cxxTokenChainNormalizeTypeNameSpacingInRange(pTypeStart,pTypeEnd); CXXToken * pTypeName = cxxTokenChainExtractRange(pTypeStart,pTypeEnd,0); CXX_DEBUG_PRINT("Type name is '%s'",vStringValue(pTypeName->pszWord)); tag->extensionFields.typeRef[0] = szTypeRef0; tag->extensionFields.typeRef[1] = vStringValue(pTypeName->pszWord); return pTypeName; } void cxxTagCommit(void) { if(g_oCXXTag.isFileScope) { if (isXtagEnabled(XTAG_FILE_SCOPE)) markTagExtraBit (&g_oCXXTag, XTAG_FILE_SCOPE); else return; } CXX_DEBUG_PRINT( "Emitting tag for symbol '%s', kind '%s', line %d", g_oCXXTag.name, g_oCXXTag.kind->name, g_oCXXTag.lineNumber ); if( g_oCXXTag.extensionFields.typeRef[0] && g_oCXXTag.extensionFields.typeRef[1] ) CXX_DEBUG_PRINT( "Tag has typeref %s %s", g_oCXXTag.extensionFields.typeRef[0], g_oCXXTag.extensionFields.typeRef[1] ); makeTagEntry(&g_oCXXTag); // Handle --extra=+q if(!isXtagEnabled(XTAG_QUALIFIED_TAGS)) return; else markTagExtraBit (&g_oCXXTag, XTAG_QUALIFIED_TAGS); if(!g_oCXXTag.extensionFields.scopeName) return; // WARNING: The following code assumes that the scope // didn't change between cxxTagBegin() and cxxTagCommit(). enum CXXTagKind eScopeKind = cxxScopeGetKind(); if(eScopeKind == CXXTagKindFUNCTION) { // old ctags didn't do this, and --extra=+q is mainly // for backward compatibility so... return; } // Same tag. Only the name changes. vString * x; if(eScopeKind == CXXTagKindENUM) { // If the scope kind is enumeration then we need to remove the // last scope part. This is what old ctags did. if(cxxScopeGetSize() < 2) return; // toplevel enum x = cxxScopeGetFullNameExceptLastComponentAsString(); CXX_DEBUG_ASSERT(x,"Scope with size >= 2 should have returned a value here"); } else { x = vStringNewInit(g_oCXXTag.extensionFields.scopeName); } vStringCatS(x,"::"); vStringCatS(x,g_oCXXTag.name); g_oCXXTag.name = vStringValue(x); CXX_DEBUG_PRINT( "Emitting extra tag for symbol '%s', kind '%s', line %d", g_oCXXTag.name, g_oCXXTag.kind->name, g_oCXXTag.lineNumber ); makeTagEntry(&g_oCXXTag); vStringDelete(x); } void cxxTag(enum CXXTagKind eKindId,CXXToken * pToken) { if(cxxTagBegin(eKindId,pToken) != NULL) cxxTagCommit(); }
1
13,503
Currently I think this is not acceptable. I think the value should be chosen by the author of the parser. (Personally "type" is better than "typename".)
universal-ctags-ctags
c
@@ -13,6 +13,7 @@ package machine +// should not need to import the ec2 sdk here import ( "fmt"
1
// Copyright © 2018 The Kubernetes Authors. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package machine import ( "fmt" "sigs.k8s.io/cluster-api-provider-aws/cloud/aws/providerconfig/v1alpha1" ec2svc "sigs.k8s.io/cluster-api-provider-aws/cloud/aws/services/ec2" "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) // machinesSvc are the functions of the cluster-api that this actuator needs. type machinesSvc interface { UpdateMachineStatus(*clusterv1.Machine) (*clusterv1.Machine, error) } // ec2Svc are the functions from the ec2 service, not the client, this actuator needs. // This should never need to import the ec2 sdk. type ec2Svc interface { CreateInstance(*clusterv1.Machine) (*ec2svc.Instance, error) InstanceIfExists(*string) (*ec2svc.Instance, error) TerminateInstance(*string) error } // codec are the functions off the generated codec that this actuator uses. type codec interface { DecodeFromProviderConfig(clusterv1.ProviderConfig, runtime.Object) error DecodeProviderStatus(*runtime.RawExtension, runtime.Object) error EncodeProviderStatus(runtime.Object) (*runtime.RawExtension, error) } // Actuator is responsible for performing machine reconciliation type Actuator struct { codec codec // Services ec2 ec2Svc machines machinesSvc } // ActuatorParams holds parameter information for Actuator type ActuatorParams struct { // Codec is needed to work with the provider configs and statuses. Codec codec // Services // ClusterService is the interface to cluster-api. MachinesService machinesSvc // EC2Service is the interface to ec2. EC2Service ec2Svc } // NewActuator returns an actuator. func NewActuator(params ActuatorParams) (*Actuator, error) { return &Actuator{ codec: params.Codec, ec2: params.EC2Service, machines: params.MachinesService, }, nil } // Create creates a machine and is invoked by the machine controller. func (a *Actuator) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { // will need this machine config in a bit _, err := a.machineProviderConfig(machine.Spec.ProviderConfig) if err != nil { glog.Errorf("Failed to decode the machine provider config: %v", err) return err } // Get the machine status status, err := a.machineProviderStatus(machine) if err != nil { return err } // does the instance exist with a valid status? we're good // otherwise create it and move on. _, err = a.ec2.InstanceIfExists(status.InstanceID) if err != nil { return err } i, err := a.ec2.CreateInstance(machine) if err != nil { return err } status.InstanceID = &i.ID status.InstanceState = &i.State return a.updateStatus(machine, status) } // Delete deletes a machine and is invoked by the Machine Controller func (a *Actuator) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { glog.Infof("Deleting machine %v for cluster %v.", machine.Name, cluster.Name) status, err := a.machineProviderStatus(machine) if err != nil { return errors.Wrap(err, "failed to get machine provider status") } instance, err := a.ec2.InstanceIfExists(status.InstanceID) if err != nil { return errors.Wrap(err, "failed to get instance") } // Check the instance state. If it's already shutting down or terminated, // do nothing. Otherwise attempt to delete it. // This decision is based on the ec2-instance-lifecycle graph at // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html switch instance.State { case ec2svc.InstanceStateShuttingDown, ec2svc.InstanceStateTerminated: return nil default: err = a.ec2.TerminateInstance(status.InstanceID) if err != nil { return errors.Wrap(err, "failed to terminate instance") } } return nil } // Update updates a machine and is invoked by the Machine Controller func (a *Actuator) Update(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { glog.Infof("Updating machine %v for cluster %v.", machine.Name, cluster.Name) return fmt.Errorf("TODO: Not yet implemented") } // Exists test for the existance of a machine and is invoked by the Machine Controller func (a *Actuator) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { glog.Infof("Checking if machine %v for cluster %v exists.", machine.Name, cluster.Name) return false, fmt.Errorf("TODO: Not yet implemented") } func (a *Actuator) machineProviderConfig(providerConfig clusterv1.ProviderConfig) (*v1alpha1.AWSMachineProviderConfig, error) { machineProviderCfg := &v1alpha1.AWSMachineProviderConfig{} err := a.codec.DecodeFromProviderConfig(providerConfig, machineProviderCfg) return machineProviderCfg, err } func (a *Actuator) machineProviderStatus(machine *clusterv1.Machine) (*v1alpha1.AWSMachineProviderStatus, error) { status := &v1alpha1.AWSMachineProviderStatus{} err := a.codec.DecodeProviderStatus(machine.Status.ProviderStatus, status) return status, err } func (a *Actuator) updateStatus(machine *clusterv1.Machine, status *v1alpha1.AWSMachineProviderStatus) error { encodedProviderStatus, err := a.codec.EncodeProviderStatus(status) if err != nil { return fmt.Errorf("failed to encode machine status: %v", err) } if encodedProviderStatus != nil { machine.Status.ProviderStatus = encodedProviderStatus if _, err := a.machines.UpdateMachineStatus(machine); err != nil { return fmt.Errorf("failed to update machine status: %v", err) } } return nil }
1
5,952
Is this excessive?
kubernetes-sigs-cluster-api-provider-aws
go
@@ -251,7 +251,7 @@ main(int argc, char *argv[]) * sysroot, we still need a writable /etc. And to avoid race conditions * we ensure it's writable in the initramfs, before we switchroot at all. */ - if (mount ("/etc", "/etc", NULL, MS_BIND, NULL) < 0) + if (mount ("etc", "etc", NULL, MS_BIND, NULL) < 0) err (EXIT_FAILURE, "failed to make /etc a bind mount"); /* Pass on the fact that we discovered a readonly sysroot to ostree-remount.service */ int fd = open (_OSTREE_SYSROOT_READONLY_STAMP, O_WRONLY | O_CREAT | O_CLOEXEC, 0644);
1
/* -*- c-file-style: "gnu" -*- * Switch to new root directory and start init. * * Copyright 2011,2012,2013 Colin Walters <[email protected]> * * Based on code from util-linux/sys-utils/switch_root.c, * Copyright 2002-2009 Red Hat, Inc. All rights reserved. * Authors: * Peter Jones <[email protected]> * Jeremy Katz <[email protected]> * * Relicensed with permission to LGPLv2+. * * SPDX-License-Identifier: LGPL-2.0+ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /* The high level goal of ostree-prepare-root.service is to run inside * the initial ram disk (if one is in use) and set up the `/` mountpoint * to be the deployment root, using the ostree= kernel commandline * argument to find the target deployment root. * * It's really the heart of how ostree works - basically multiple * hardlinked chroot() targets are maintained, this one does the equivalent * of chroot(). * * If using systemd, an excellent reference is `man bootup`. This * service runs Before=initrd-root-fs.target. At this point it's * assumed that the block storage and root filesystem are mounted at * /sysroot - i.e. /sysroot points to the *physical* root before * this service runs. After, `/` is the deployment root. * * There is also a secondary mode for this service when an initrd isn't * used - instead the binary must be statically linked (and the kernel * must have mounted the rootfs itself) - then we set things up and * exec the real init directly. This can be popular in embedded * systems to increase bootup speed. */ #include "config.h" #include <sys/mount.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/param.h> #include <sys/syscall.h> #include <fcntl.h> #include <stdio.h> #include <assert.h> #include <stdarg.h> #include <stdbool.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <err.h> #include <errno.h> #include <ctype.h> #if defined(HAVE_LIBSYSTEMD) && !defined(OSTREE_PREPARE_ROOT_STATIC) #define USE_LIBSYSTEMD #endif #ifdef USE_LIBSYSTEMD #include <systemd/sd-journal.h> #define OSTREE_PREPARE_ROOT_DEPLOYMENT_MSG SD_ID128_MAKE(71,70,33,6a,73,ba,46,01,ba,d3,1a,f8,88,aa,0d,f7) #endif #include "ostree-mount-util.h" /* Initialized early in main */ static bool running_as_pid1; static inline bool sysroot_is_configured_ro (const char *sysroot) { char * config_path = NULL; assert (asprintf (&config_path, "%s/ostree/repo/config", sysroot) != -1); FILE *f = fopen(config_path, "r"); if (!f) { fprintf (stderr, "Missing expected repo config: %s\n", config_path); free (config_path); return false; } free (config_path); bool ret = false; char *line = NULL; size_t len = 0; /* Note getline() will reuse the previous buffer */ bool in_sysroot = false; while (getline (&line, &len, f) != -1) { /* This is an awful hack to avoid depending on GLib in the * initramfs right now. */ if (strstr (line, "[sysroot]") == line) in_sysroot = true; else if (*line == '[') in_sysroot = false; else if (in_sysroot && strstr (line, "readonly=true") == line) { ret = true; break; } } fclose (f); free (line); return ret; } static char* resolve_deploy_path (const char * root_mountpoint) { char destpath[PATH_MAX]; struct stat stbuf; char *ostree_target, *deploy_path; ostree_target = read_proc_cmdline_ostree (); if (!ostree_target) errx (EXIT_FAILURE, "No OSTree target; expected ostree=/ostree/boot.N/..."); snprintf (destpath, sizeof(destpath), "%s/%s", root_mountpoint, ostree_target); if (lstat (destpath, &stbuf) < 0) err (EXIT_FAILURE, "Couldn't find specified OSTree root '%s'", destpath); if (!S_ISLNK (stbuf.st_mode)) errx (EXIT_FAILURE, "OSTree target is not a symbolic link: %s", destpath); deploy_path = realpath (destpath, NULL); if (deploy_path == NULL) err (EXIT_FAILURE, "realpath(%s) failed", destpath); if (stat (deploy_path, &stbuf) < 0) err (EXIT_FAILURE, "stat(%s) failed", deploy_path); /* Quiet logs if there's no journal */ #ifdef USE_LIBSYSTEMD const char *resolved_path = deploy_path + strlen (root_mountpoint); sd_journal_send ("MESSAGE=Resolved OSTree target to: %s", deploy_path, "MESSAGE_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(OSTREE_PREPARE_ROOT_DEPLOYMENT_MSG), "DEPLOYMENT_PATH=%s", resolved_path, "DEPLOYMENT_DEVICE=%u", stbuf.st_dev, "DEPLOYMENT_INODE=%u", stbuf.st_ino, NULL); #endif return deploy_path; } static int pivot_root(const char * new_root, const char * put_old) { return syscall(__NR_pivot_root, new_root, put_old); } int main(int argc, char *argv[]) { /* If we're pid 1, that means there's no initramfs; in this situation * various defaults change: * * - Assume that the target root is / * - Quiet logging as there's no journal * etc. */ running_as_pid1 = (getpid () == 1); const char *root_arg = NULL; bool we_mounted_proc = false; if (running_as_pid1) { root_arg = "/"; } else { if (argc < 2) err (EXIT_FAILURE, "usage: ostree-prepare-root SYSROOT"); root_arg = argv[1]; } struct stat stbuf; if (stat ("/proc/cmdline", &stbuf) < 0) { if (errno != ENOENT) err (EXIT_FAILURE, "stat(\"/proc/cmdline\") failed"); /* We need /proc mounted for /proc/cmdline and realpath (on musl) to * work: */ if (mount ("proc", "/proc", "proc", 0, NULL) < 0) err (EXIT_FAILURE, "failed to mount proc on /proc"); we_mounted_proc = 1; } const char *root_mountpoint = realpath (root_arg, NULL); if (root_mountpoint == NULL) err (EXIT_FAILURE, "realpath(\"%s\")", root_arg); char *deploy_path = resolve_deploy_path (root_mountpoint); if (we_mounted_proc) { /* Leave the filesystem in the state that we found it: */ if (umount ("/proc")) err (EXIT_FAILURE, "failed to umount proc from /proc"); } /* Work-around for a kernel bug: for some reason the kernel * refuses switching root if any file systems are mounted * MS_SHARED. Hence remount them MS_PRIVATE here as a * work-around. * * https://bugzilla.redhat.com/show_bug.cgi?id=847418 */ if (mount (NULL, "/", NULL, MS_REC|MS_PRIVATE, NULL) < 0) err (EXIT_FAILURE, "failed to make \"/\" private mount"); /* Make deploy_path a bind mount, so we can move it later */ if (mount (deploy_path, deploy_path, NULL, MS_BIND, NULL) < 0) err (EXIT_FAILURE, "failed to make initial bind mount %s", deploy_path); /* chdir to our new root. We need to do this after bind-mounting it over * itself otherwise our cwd is still on the non-bind-mounted filesystem * below. */ if (chdir (deploy_path) < 0) err (EXIT_FAILURE, "failed to chdir to deploy_path"); /* Query the repository configuration - this is an operating system builder * choice. More info: https://github.com/ostreedev/ostree/pull/1767 */ const bool sysroot_readonly = sysroot_is_configured_ro (root_arg); const bool sysroot_currently_writable = !path_is_on_readonly_fs (root_arg); #ifdef USE_LIBSYSTEMD sd_journal_send ("MESSAGE=sysroot configured read-only: %d, currently writable: %d", (int)sysroot_readonly, (int)sysroot_currently_writable, NULL); #endif if (sysroot_readonly) { if (!sysroot_currently_writable) errx (EXIT_FAILURE, "sysroot=readonly currently requires writable / in initramfs"); /* Now, /etc is not normally a bind mount, but if we have a readonly * sysroot, we still need a writable /etc. And to avoid race conditions * we ensure it's writable in the initramfs, before we switchroot at all. */ if (mount ("/etc", "/etc", NULL, MS_BIND, NULL) < 0) err (EXIT_FAILURE, "failed to make /etc a bind mount"); /* Pass on the fact that we discovered a readonly sysroot to ostree-remount.service */ int fd = open (_OSTREE_SYSROOT_READONLY_STAMP, O_WRONLY | O_CREAT | O_CLOEXEC, 0644); if (fd < 0) err (EXIT_FAILURE, "failed to create %s", _OSTREE_SYSROOT_READONLY_STAMP); (void) close (fd); } /* Default to true, but in the systemd case, default to false because it's handled by * ostree-system-generator. */ bool mount_var = true; #ifdef HAVE_SYSTEMD_AND_LIBMOUNT mount_var = false; #endif /* file in /run can override the default behaviour so that we definitely mount /var */ if (lstat (INITRAMFS_MOUNT_VAR, &stbuf) == 0) mount_var = true; /* Link to the deployment's /var */ if (mount_var && mount ("../../var", "var", NULL, MS_BIND, NULL) < 0) err (EXIT_FAILURE, "failed to bind mount ../../var to var"); char srcpath[PATH_MAX]; /* If /boot is on the same partition, use a bind mount to make it visible * at /boot inside the deployment. */ snprintf (srcpath, sizeof(srcpath), "%s/boot/loader", root_mountpoint); if (lstat (srcpath, &stbuf) == 0 && S_ISLNK (stbuf.st_mode)) { if (lstat ("boot", &stbuf) == 0 && S_ISDIR (stbuf.st_mode)) { snprintf (srcpath, sizeof(srcpath), "%s/boot", root_mountpoint); if (mount (srcpath, "boot", NULL, MS_BIND, NULL) < 0) err (EXIT_FAILURE, "failed to bind mount %s to boot", srcpath); } } /* Do we have a persistent overlayfs for /usr? If so, mount it now. */ if (lstat (".usr-ovl-work", &stbuf) == 0) { const char usr_ovl_options[] = "lowerdir=usr,upperdir=.usr-ovl-upper,workdir=.usr-ovl-work"; /* Except overlayfs barfs if we try to mount it on a read-only * filesystem. For this use case I think admins are going to be * okay if we remount the rootfs here, rather than waiting until * later boot and `systemd-remount-fs.service`. */ if (path_is_on_readonly_fs (".")) { if (mount (".", ".", NULL, MS_REMOUNT | MS_SILENT, NULL) < 0) err (EXIT_FAILURE, "failed to remount rootfs writable (for overlayfs)"); } if (mount ("overlay", "usr", "overlay", 0, usr_ovl_options) < 0) err (EXIT_FAILURE, "failed to mount /usr overlayfs"); } else { /* Otherwise, a read-only bind mount for /usr */ if (mount ("usr", "usr", NULL, MS_BIND, NULL) < 0) err (EXIT_FAILURE, "failed to bind mount (class:readonly) /usr"); if (mount ("usr", "usr", NULL, MS_BIND | MS_REMOUNT | MS_RDONLY, NULL) < 0) err (EXIT_FAILURE, "failed to bind mount (class:readonly) /usr"); } /* We only stamp /run now if we're running in an initramfs, i.e. we're * not pid 1. Otherwise it's handled later via ostree-system-generator. * https://mail.gnome.org/archives/ostree-list/2018-March/msg00012.html * https://github.com/ostreedev/ostree/pull/1675 */ if (!running_as_pid1) touch_run_ostree (); if (strcmp(root_mountpoint, "/") == 0) { /* pivot_root rotates two mount points around. In this instance . (the * deploy location) becomes / and the existing / becomes /sysroot. We * have to use pivot_root rather than mount --move in this instance * because our deploy location is mounted as a subdirectory of the real * sysroot, so moving sysroot would also move the deploy location. In * reality attempting mount --move would fail with EBUSY. */ if (pivot_root (".", "sysroot") < 0) err (EXIT_FAILURE, "failed to pivot_root to deployment"); } else { /* In this instance typically we have our ready made-up up root at * /sysroot/ostree/deploy/.../ (deploy_path) and the real rootfs at * /sysroot (root_mountpoint). We want to end up with our made-up root at * /sysroot/ and the real rootfs under /sysroot/sysroot as systemd will be * responsible for moving /sysroot to /. * * We need to do this in 3 moves to avoid trying to move /sysroot under * itself: * * 1. /sysroot/ostree/deploy/... -> /sysroot.tmp * 2. /sysroot -> /sysroot.tmp/sysroot * 3. /sysroot.tmp -> /sysroot */ if (mkdir ("/sysroot.tmp", 0755) < 0) err (EXIT_FAILURE, "couldn't create temporary sysroot /sysroot.tmp"); if (mount (deploy_path, "/sysroot.tmp", NULL, MS_MOVE, NULL) < 0) err (EXIT_FAILURE, "failed to MS_MOVE '%s' to '/sysroot.tmp'", deploy_path); if (mount (root_mountpoint, "sysroot", NULL, MS_MOVE, NULL) < 0) err (EXIT_FAILURE, "failed to MS_MOVE '%s' to 'sysroot'", root_mountpoint); if (mount (".", root_mountpoint, NULL, MS_MOVE, NULL) < 0) err (EXIT_FAILURE, "failed to MS_MOVE %s to %s", deploy_path, root_mountpoint); if (rmdir ("/sysroot.tmp") < 0) err (EXIT_FAILURE, "couldn't remove temporary sysroot /sysroot.tmp"); } /* The /sysroot mount needs to be private to avoid having a mount for e.g. /var/cache * also propagate to /sysroot/ostree/deploy/$stateroot/var/cache * * Now in reality, today this is overridden by systemd: the *actual* way we fix this up * is in ostree-remount.c. But let's do it here to express the semantics we want * at the very start (perhaps down the line systemd will have compile/runtime option * to say that the initramfs environment did everything right from the start). */ if (mount ("none", "sysroot", NULL, MS_PRIVATE, NULL) < 0) err (EXIT_FAILURE, "remounting 'sysroot' private"); if (running_as_pid1) { execl ("/sbin/init", "/sbin/init", NULL); err (EXIT_FAILURE, "failed to exec init inside ostree"); } else { exit (EXIT_SUCCESS); } }
1
18,555
Ohhhh I see, this change was previously having *no effect* - I had thought you meant we were doing something like bind mounting the initramfs' `/etc` as the real `/etc` but we'd clearly notice if that happened, we'd be missing all sorts of config files etc.
ostreedev-ostree
c
@@ -133,6 +133,15 @@ abstract class SnapshotProducer<ThisT> implements SnapshotUpdate<ThisT> { */ protected abstract String operation(); + /** + * A Long that write sequenceNumber in manifest-list file. + * + * @return a string operation + */ + protected Long sequenceNumber() { + return null; + } + /** * Validate the current metadata. * <p>
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.LoadingCache; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import org.apache.iceberg.events.Listeners; import org.apache.iceberg.exceptions.CommitFailedException; import org.apache.iceberg.exceptions.CommitStateUnknownException; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.util.Exceptions; import org.apache.iceberg.util.Tasks; import org.apache.iceberg.util.ThreadPools; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS; import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT; import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS; import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT; import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES; import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT; import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS; import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT; import static org.apache.iceberg.TableProperties.MANIFEST_LISTS_ENABLED; import static org.apache.iceberg.TableProperties.MANIFEST_LISTS_ENABLED_DEFAULT; @SuppressWarnings("UnnecessaryAnonymousClass") abstract class SnapshotProducer<ThisT> implements SnapshotUpdate<ThisT> { private static final Logger LOG = LoggerFactory.getLogger(SnapshotProducer.class); static final Set<ManifestFile> EMPTY_SET = Sets.newHashSet(); /** * Default callback used to delete files. */ private final Consumer<String> defaultDelete = new Consumer<String>() { @Override public void accept(String file) { ops.io().deleteFile(file); } }; /** * Cache used to enrich ManifestFile instances that are written to a ManifestListWriter. */ private final LoadingCache<ManifestFile, ManifestFile> manifestsWithMetadata; private final TableOperations ops; private final String commitUUID = UUID.randomUUID().toString(); private final AtomicInteger manifestCount = new AtomicInteger(0); private final AtomicInteger attempt = new AtomicInteger(0); private final List<String> manifestLists = Lists.newArrayList(); private volatile Long snapshotId = null; private TableMetadata base; private boolean stageOnly = false; private Consumer<String> deleteFunc = defaultDelete; protected SnapshotProducer(TableOperations ops) { this.ops = ops; this.base = ops.current(); this.manifestsWithMetadata = Caffeine .newBuilder() .build(file -> { if (file.snapshotId() != null) { return file; } return addMetadata(ops, file); }); } protected abstract ThisT self(); @Override public ThisT stageOnly() { this.stageOnly = true; return self(); } @Override public ThisT deleteWith(Consumer<String> deleteCallback) { Preconditions.checkArgument(this.deleteFunc == defaultDelete, "Cannot set delete callback more than once"); this.deleteFunc = deleteCallback; return self(); } /** * Clean up any uncommitted manifests that were created. * <p> * Manifests may not be committed if apply is called more because a commit conflict has occurred. * Implementations may keep around manifests because the same changes will be made by both apply * calls. This method instructs the implementation to clean up those manifests and passes the * paths of the manifests that were actually committed. * * @param committed a set of manifest paths that were actually committed */ protected abstract void cleanUncommitted(Set<ManifestFile> committed); /** * A string that describes the action that produced the new snapshot. * * @return a string operation */ protected abstract String operation(); /** * Validate the current metadata. * <p> * Child operations can override this to add custom validation. * * @param currentMetadata current table metadata to validate */ protected void validate(TableMetadata currentMetadata) { } /** * Apply the update's changes to the base table metadata and return the new manifest list. * * @param metadataToUpdate the base table metadata to apply changes to * @return a manifest list for the new snapshot. */ protected abstract List<ManifestFile> apply(TableMetadata metadataToUpdate); @Override public Snapshot apply() { this.base = refresh(); Long parentSnapshotId = base.currentSnapshot() != null ? base.currentSnapshot().snapshotId() : null; long sequenceNumber = base.nextSequenceNumber(); // run validations from the child operation validate(base); List<ManifestFile> manifests = apply(base); if (base.formatVersion() > 1 || base.propertyAsBoolean(MANIFEST_LISTS_ENABLED, MANIFEST_LISTS_ENABLED_DEFAULT)) { OutputFile manifestList = manifestListPath(); try (ManifestListWriter writer = ManifestLists.write( ops.current().formatVersion(), manifestList, snapshotId(), parentSnapshotId, sequenceNumber)) { // keep track of the manifest lists created manifestLists.add(manifestList.location()); ManifestFile[] manifestFiles = new ManifestFile[manifests.size()]; Tasks.range(manifestFiles.length) .stopOnFailure().throwFailureWhenFinished() .executeWith(ThreadPools.getWorkerPool()) .run(index -> manifestFiles[index] = manifestsWithMetadata.get(manifests.get(index))); writer.addAll(Arrays.asList(manifestFiles)); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to write manifest list file"); } return new BaseSnapshot(ops.io(), sequenceNumber, snapshotId(), parentSnapshotId, System.currentTimeMillis(), operation(), summary(base), base.currentSchemaId(), manifestList.location()); } else { return new BaseSnapshot(ops.io(), snapshotId(), parentSnapshotId, System.currentTimeMillis(), operation(), summary(base), base.currentSchemaId(), manifests); } } protected abstract Map<String, String> summary(); /** * Returns the snapshot summary from the implementation and updates totals. */ private Map<String, String> summary(TableMetadata previous) { Map<String, String> summary = summary(); if (summary == null) { return ImmutableMap.of(); } Map<String, String> previousSummary; if (previous.currentSnapshot() != null) { if (previous.currentSnapshot().summary() != null) { previousSummary = previous.currentSnapshot().summary(); } else { // previous snapshot had no summary, use an empty summary previousSummary = ImmutableMap.of(); } } else { // if there was no previous snapshot, default the summary to start totals at 0 ImmutableMap.Builder<String, String> summaryBuilder = ImmutableMap.builder(); summaryBuilder .put(SnapshotSummary.TOTAL_RECORDS_PROP, "0") .put(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "0") .put(SnapshotSummary.TOTAL_DATA_FILES_PROP, "0") .put(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "0") .put(SnapshotSummary.TOTAL_POS_DELETES_PROP, "0") .put(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "0"); previousSummary = summaryBuilder.build(); } ImmutableMap.Builder<String, String> builder = ImmutableMap.builder(); // copy all summary properties from the implementation builder.putAll(summary); updateTotal( builder, previousSummary, SnapshotSummary.TOTAL_RECORDS_PROP, summary, SnapshotSummary.ADDED_RECORDS_PROP, SnapshotSummary.DELETED_RECORDS_PROP); updateTotal( builder, previousSummary, SnapshotSummary.TOTAL_FILE_SIZE_PROP, summary, SnapshotSummary.ADDED_FILE_SIZE_PROP, SnapshotSummary.REMOVED_FILE_SIZE_PROP); updateTotal( builder, previousSummary, SnapshotSummary.TOTAL_DATA_FILES_PROP, summary, SnapshotSummary.ADDED_FILES_PROP, SnapshotSummary.DELETED_FILES_PROP); updateTotal( builder, previousSummary, SnapshotSummary.TOTAL_DELETE_FILES_PROP, summary, SnapshotSummary.ADDED_DELETE_FILES_PROP, SnapshotSummary.REMOVED_DELETE_FILES_PROP); updateTotal( builder, previousSummary, SnapshotSummary.TOTAL_POS_DELETES_PROP, summary, SnapshotSummary.ADDED_POS_DELETES_PROP, SnapshotSummary.REMOVED_POS_DELETES_PROP); updateTotal( builder, previousSummary, SnapshotSummary.TOTAL_EQ_DELETES_PROP, summary, SnapshotSummary.ADDED_EQ_DELETES_PROP, SnapshotSummary.REMOVED_EQ_DELETES_PROP); return builder.build(); } protected TableMetadata current() { return base; } protected TableMetadata refresh() { this.base = ops.refresh(); return base; } @Override public void commit() { // this is always set to the latest commit attempt's snapshot id. AtomicLong newSnapshotId = new AtomicLong(-1L); try { Tasks.foreach(ops) .retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT)) .exponentialBackoff( base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT), base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT), base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT), 2.0 /* exponential */) .onlyRetryOn(CommitFailedException.class) .run(taskOps -> { Snapshot newSnapshot = apply(); newSnapshotId.set(newSnapshot.snapshotId()); TableMetadata updated; if (stageOnly) { updated = base.addStagedSnapshot(newSnapshot); } else { updated = base.replaceCurrentSnapshot(newSnapshot); } if (updated == base) { // do not commit if the metadata has not changed. for example, this may happen when setting the current // snapshot to an ID that is already current. note that this check uses identity. return; } // if the table UUID is missing, add it here. the UUID will be re-created each time this operation retries // to ensure that if a concurrent operation assigns the UUID, this operation will not fail. taskOps.commit(base, updated.withUUID()); }); } catch (CommitStateUnknownException commitStateUnknownException) { throw commitStateUnknownException; } catch (RuntimeException e) { Exceptions.suppressAndThrow(e, this::cleanAll); } LOG.info("Committed snapshot {} ({})", newSnapshotId.get(), getClass().getSimpleName()); try { // at this point, the commit must have succeeded. after a refresh, the snapshot is loaded by // id in case another commit was added between this commit and the refresh. Snapshot saved = ops.refresh().snapshot(newSnapshotId.get()); if (saved != null) { cleanUncommitted(Sets.newHashSet(saved.allManifests())); // also clean up unused manifest lists created by multiple attempts for (String manifestList : manifestLists) { if (!saved.manifestListLocation().equals(manifestList)) { deleteFile(manifestList); } } } else { // saved may not be present if the latest metadata couldn't be loaded due to eventual // consistency problems in refresh. in that case, don't clean up. LOG.warn("Failed to load committed snapshot, skipping manifest clean-up"); } } catch (RuntimeException e) { LOG.warn("Failed to load committed table metadata, skipping manifest clean-up", e); } notifyListeners(); } private void notifyListeners() { try { Object event = updateEvent(); if (event != null) { Listeners.notifyAll(event); } } catch (RuntimeException e) { LOG.warn("Failed to notify listeners", e); } } protected void cleanAll() { for (String manifestList : manifestLists) { deleteFile(manifestList); } manifestLists.clear(); cleanUncommitted(EMPTY_SET); } protected void deleteFile(String path) { deleteFunc.accept(path); } protected OutputFile manifestListPath() { return ops.io().newOutputFile(ops.metadataFileLocation(FileFormat.AVRO.addExtension( String.format("snap-%d-%d-%s", snapshotId(), attempt.incrementAndGet(), commitUUID)))); } protected OutputFile newManifestOutput() { return ops.io().newOutputFile( ops.metadataFileLocation(FileFormat.AVRO.addExtension(commitUUID + "-m" + manifestCount.getAndIncrement()))); } protected ManifestWriter<DataFile> newManifestWriter(PartitionSpec spec) { return ManifestFiles.write(ops.current().formatVersion(), spec, newManifestOutput(), snapshotId()); } protected ManifestWriter<DeleteFile> newDeleteManifestWriter(PartitionSpec spec) { return ManifestFiles.writeDeleteManifest(ops.current().formatVersion(), spec, newManifestOutput(), snapshotId()); } protected ManifestReader<DataFile> newManifestReader(ManifestFile manifest) { return ManifestFiles.read(manifest, ops.io(), ops.current().specsById()); } protected ManifestReader<DeleteFile> newDeleteManifestReader(ManifestFile manifest) { return ManifestFiles.readDeleteManifest(manifest, ops.io(), ops.current().specsById()); } protected long snapshotId() { if (snapshotId == null) { synchronized (this) { if (snapshotId == null) { this.snapshotId = ops.newSnapshotId(); } } } return snapshotId; } private static ManifestFile addMetadata(TableOperations ops, ManifestFile manifest) { try (ManifestReader<DataFile> reader = ManifestFiles.read(manifest, ops.io(), ops.current().specsById())) { PartitionSummary stats = new PartitionSummary(ops.current().spec(manifest.partitionSpecId())); int addedFiles = 0; long addedRows = 0L; int existingFiles = 0; long existingRows = 0L; int deletedFiles = 0; long deletedRows = 0L; Long snapshotId = null; long maxSnapshotId = Long.MIN_VALUE; for (ManifestEntry<DataFile> entry : reader.entries()) { if (entry.snapshotId() > maxSnapshotId) { maxSnapshotId = entry.snapshotId(); } switch (entry.status()) { case ADDED: addedFiles += 1; addedRows += entry.file().recordCount(); if (snapshotId == null) { snapshotId = entry.snapshotId(); } break; case EXISTING: existingFiles += 1; existingRows += entry.file().recordCount(); break; case DELETED: deletedFiles += 1; deletedRows += entry.file().recordCount(); if (snapshotId == null) { snapshotId = entry.snapshotId(); } break; } stats.update(entry.file().partition()); } if (snapshotId == null) { // if no files were added or deleted, use the largest snapshot ID in the manifest snapshotId = maxSnapshotId; } return new GenericManifestFile(manifest.path(), manifest.length(), manifest.partitionSpecId(), ManifestContent.DATA, manifest.sequenceNumber(), manifest.minSequenceNumber(), snapshotId, addedFiles, addedRows, existingFiles, existingRows, deletedFiles, deletedRows, stats.summaries(), null); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to read manifest: %s", manifest.path()); } } private static void updateTotal(ImmutableMap.Builder<String, String> summaryBuilder, Map<String, String> previousSummary, String totalProperty, Map<String, String> currentSummary, String addedProperty, String deletedProperty) { String totalStr = previousSummary.get(totalProperty); if (totalStr != null) { try { long newTotal = Long.parseLong(totalStr); String addedStr = currentSummary.get(addedProperty); if (newTotal >= 0 && addedStr != null) { newTotal += Long.parseLong(addedStr); } String deletedStr = currentSummary.get(deletedProperty); if (newTotal >= 0 && deletedStr != null) { newTotal -= Long.parseLong(deletedStr); } if (newTotal >= 0) { summaryBuilder.put(totalProperty, String.valueOf(newTotal)); } } catch (NumberFormatException e) { // ignore and do not add total } } } }
1
42,649
I think this needs a more specific name, like `sequenceNumberOverride`
apache-iceberg
java
@@ -39,7 +39,8 @@ module Blacklight when /::/ connection_config[:adapter].constantize else - Blacklight.const_get("#{connection_config[:adapter]}/Repository".classify) + raise "The value for :adapter was not found in the blacklight.yml config" unless connection_config.key? :adapter + Blacklight.const_get("#{connection_config.fetch(:adapter)}/Repository".classify) end end
1
require 'kaminari' require 'deprecation' require 'blacklight/utils' require 'active_support/hash_with_indifferent_access' module Blacklight autoload :AbstractRepository, 'blacklight/abstract_repository' autoload :Configuration, 'blacklight/configuration' autoload :Exceptions, 'blacklight/exceptions' autoload :Parameters, 'blacklight/parameters' autoload :Routes, 'blacklight/routes' autoload :SearchBuilder, 'blacklight/search_builder' autoload :SearchState, 'blacklight/search_state' autoload :Solr, 'blacklight/solr' extend Deprecation require 'blacklight/version' require 'blacklight/engine' if defined?(Rails) def self.blacklight_config_file "#{::Rails.root}/config/blacklight.yml" end ## # The default index connection for the search index def self.default_index @default_index ||= repository_class.new(default_configuration) end ## # The configured repository class. By convention, this is # the class Blacklight::{name of the adapter}::Repository, e.g. # elastic_search => Blacklight::ElasticSearch::Repository def self.repository_class case connection_config[:adapter] when 'solr' Blacklight::Solr::Repository when /::/ connection_config[:adapter].constantize else Blacklight.const_get("#{connection_config[:adapter]}/Repository".classify) end end ## # The default Blacklight configuration. def self.default_configuration Blacklight::Configuration.new end def self.connection_config @connection_config ||= begin raise "The #{::Rails.env} environment settings were not found in the blacklight.yml config" unless blacklight_yml[::Rails.env] blacklight_yml[::Rails.env].symbolize_keys end end def self.blacklight_yml require 'erb' require 'yaml' return @blacklight_yml if @blacklight_yml unless File.exists?(blacklight_config_file) raise "You are missing a configuration file: #{blacklight_config_file}. Have you run \"rails generate blacklight:install\"?" end begin blacklight_erb = ERB.new(IO.read(blacklight_config_file)).result(binding) rescue StandardError, SyntaxError => e raise("#{blacklight_config_file} was found, but could not be parsed with ERB. \n#{e.inspect}") end begin @blacklight_yml = YAML::load(blacklight_erb) rescue => e raise("#{blacklight_config_file} was found, but could not be parsed.\n#{e.inspect}") end if @blacklight_yml.nil? || !@blacklight_yml.is_a?(Hash) raise("#{blacklight_config_file} was found, but was blank or malformed.\n") end return @blacklight_yml end def self.logger @logger ||= begin ::Rails.logger if defined? Rails and Rails.respond_to? :logger end end def self.logger= logger @logger = logger end ############# # Methods for figuring out path to BL plugin, and then locate various files # either in the app itself or defaults in the plugin -- whether you are running # from the plugin itself or from an actual app using te plugin. # In a seperate module so it can be used by both Blacklight class, and # by rake tasks without loading the whole Rails environment. ############# # returns the full path the the blacklight plugin installation def self.root @root ||= File.expand_path(File.dirname(File.dirname(__FILE__))) end end
1
6,287
Ought we just raise an exception if the adapter isn't defined?
projectblacklight-blacklight
rb
@@ -5,7 +5,6 @@ var stsLegacyGlobalRegions = map[string]struct{}{ "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, - "aws-global": {}, "ca-central-1": {}, "eu-central-1": {}, "eu-north-1": {},
1
package endpoints var stsLegacyGlobalRegions = map[string]struct{}{ "ap-northeast-1": {}, "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, "aws-global": {}, "ca-central-1": {}, "eu-central-1": {}, "eu-north-1": {}, "eu-west-1": {}, "eu-west-2": {}, "eu-west-3": {}, "sa-east-1": {}, "us-east-1": {}, "us-east-2": {}, "us-west-1": {}, "us-west-2": {}, }
1
10,008
Should this have been removed? We still set the region to "aws-global" in v3model.go#L115
aws-aws-sdk-go
go
@@ -335,14 +335,15 @@ def setup_logging(config): errno=errors.ERRORS.INVALID_PARAMETERS, message='Invalid URL path.') + qs = dict(errors.request_GET(request)) request.log_context(agent=request.headers.get('User-Agent'), path=request_path, method=request.method, - querystring=dict(errors.request_GET(request)), + querystring=(qs if len(qs) else None), lang=request.headers.get('Accept-Language'), uid=None, authn_type=None, - errno=None) + errno=0) config.add_subscriber(on_new_request, NewRequest)
1
import logging import re import warnings from datetime import datetime from dateutil import parser as dateparser from pyramid.events import NewRequest, NewResponse from pyramid.exceptions import ConfigurationError from pyramid.httpexceptions import (HTTPTemporaryRedirect, HTTPGone, HTTPBadRequest) from pyramid.renderers import JSON as JSONRenderer from pyramid.response import Response from pyramid.security import NO_PERMISSION_REQUIRED from pyramid.interfaces import IAuthenticationPolicy from pyramid.settings import asbool, aslist from pyramid_multiauth import (MultiAuthenticationPolicy, MultiAuthPolicySelected) try: import newrelic.agent except ImportError: # pragma: no cover newrelic = None try: from werkzeug.contrib.profiler import ProfilerMiddleware except ImportError: # pragma: no cover pass from kinto.core import errors from kinto.core import utils from kinto.core import cache from kinto.core import storage from kinto.core import permission from kinto.core.events import ResourceRead, ResourceChanged, ACTIONS logger = logging.getLogger(__name__) summary_logger = logging.getLogger('request.summary') def setup_request_bound_data(config): """Attach custom data on request object, and share it with parent requests during batch.""" def attach_bound_data(request): parent = getattr(request, 'parent', None) return parent.bound_data if parent else {} config.add_request_method(attach_bound_data, name='bound_data', reify=True) def setup_json_serializer(config): import requests import webob # Monkey patch to use ujson webob.request.json = utils.json requests.models.json = utils.json # Override json renderer using ujson renderer = JSONRenderer(serializer=utils.json_serializer) config.add_renderer('json', renderer) def setup_version_redirection(config): """Add a view which redirects to the current version of the API. """ settings = config.get_settings() redirect_enabled = settings['version_prefix_redirect_enabled'] version_prefix_redirection_enabled = asbool(redirect_enabled) route_prefix = config.route_prefix config.registry.route_prefix = route_prefix # Redirect to the current version of the API if the prefix isn't used. # Do not redirect if kinto.version_prefix_redirect_enabled is set to # False. if not version_prefix_redirection_enabled: return def _redirect_to_version_view(request): if request.method.lower() == 'options': # CORS responses should always have status 200. return utils.reapply_cors(request, Response()) querystring = request.url[(request.url.rindex(request.path) + len(request.path)):] redirect = '/{}{}{}'.format(route_prefix, request.path, querystring) raise HTTPTemporaryRedirect(redirect) # Disable the route prefix passed by the app. config.route_prefix = None config.add_route(name='redirect_to_version', pattern=r'/{path:(?!v[0-9]+)[^\r\n]*}') config.add_view(view=_redirect_to_version_view, route_name='redirect_to_version', permission=NO_PERMISSION_REQUIRED) config.route_prefix = route_prefix def setup_authentication(config): """Let pyramid_multiauth manage authentication and authorization from configuration. """ config.include('pyramid_multiauth') settings = config.get_settings() policies = aslist(settings['multiauth.policies']) if 'basicauth' in policies: config.include('kinto.core.authentication') # Track policy used, for prefixing user_id and for logging. def on_policy_selected(event): request = event.request authn_type = event.policy_name.lower() request.authn_type = authn_type request.selected_userid = event.userid # Add authentication info to context. request.log_context(uid=event.userid, authn_type=authn_type) config.add_subscriber(on_policy_selected, MultiAuthPolicySelected) def setup_backoff(config): """Attach HTTP requests/responses objects. This is useful to attach objects to the request object for easier access, and to pre-process responses. """ def on_new_response(event): # Add backoff in response headers. backoff = config.registry.settings['backoff'] if backoff is not None: event.response.headers['Backoff'] = str(backoff) config.add_subscriber(on_new_response, NewResponse) def setup_requests_scheme(config): """Force server scheme, host and port at the application level.""" settings = config.get_settings() http_scheme = settings['http_scheme'] http_host = settings['http_host'] def on_new_request(event): if http_scheme: event.request.scheme = http_scheme if http_host: event.request.host = http_host if http_scheme or http_host: config.add_subscriber(on_new_request, NewRequest) def setup_deprecation(config): config.add_tween('kinto.core.initialization._end_of_life_tween_factory') def _end_of_life_tween_factory(handler, registry): """Pyramid tween to handle service end of life.""" deprecation_msg = ('The service you are trying to connect no longer exists' ' at this location.') def eos_tween(request): eos_date = registry.settings['eos'] eos_url = registry.settings['eos_url'] eos_message = registry.settings['eos_message'] if not eos_date: return handler(request) eos_date = dateparser.parse(eos_date) if eos_date > datetime.now(): code = 'soft-eol' request.response = handler(request) else: code = 'hard-eol' request.response = errors.http_error( HTTPGone(), errno=errors.ERRORS.SERVICE_DEPRECATED, message=deprecation_msg) errors.send_alert(request, eos_message, url=eos_url, code=code) return request.response return eos_tween def setup_storage(config): settings = config.get_settings() # Id generators by resource name. config.registry.id_generators = {} for key, value in settings.items(): m = re.match(r'^([^_]*)_?id_generator', key) if m is None: continue resource_name = m.group(1) id_generator = config.maybe_dotted(value) config.registry.id_generators[resource_name] = id_generator() storage_mod = settings['storage_backend'] if not storage_mod: return storage_mod = config.maybe_dotted(storage_mod) backend = storage_mod.load_from_config(config) if not isinstance(backend, storage.StorageBase): raise ConfigurationError('Invalid storage backend: {}'.format(backend)) config.registry.storage = backend heartbeat = storage.heartbeat(backend) config.registry.heartbeats['storage'] = heartbeat def setup_permission(config): settings = config.get_settings() permission_mod = settings['permission_backend'] if not permission_mod: return permission_mod = config.maybe_dotted(permission_mod) backend = permission_mod.load_from_config(config) if not isinstance(backend, permission.PermissionBase): raise ConfigurationError('Invalid permission backend: {}'.format(backend)) config.registry.permission = backend heartbeat = permission.heartbeat(backend) config.registry.heartbeats['permission'] = heartbeat def setup_cache(config): settings = config.get_settings() cache_mod = settings['cache_backend'] if not cache_mod: return cache_mod = config.maybe_dotted(cache_mod) backend = cache_mod.load_from_config(config) if not isinstance(backend, cache.CacheBase): raise ConfigurationError('Invalid cache backend: {}'.format(backend)) config.registry.cache = backend heartbeat = cache.heartbeat(backend) config.registry.heartbeats['cache'] = heartbeat def setup_statsd(config): settings = config.get_settings() config.registry.statsd = None if settings['statsd_url']: statsd_mod = settings['statsd_backend'] statsd_mod = config.maybe_dotted(statsd_mod) client = statsd_mod.load_from_config(config) config.registry.statsd = client client.watch_execution_time(config.registry.cache, prefix='backend') client.watch_execution_time(config.registry.storage, prefix='backend') client.watch_execution_time(config.registry.permission, prefix='backend') # Commit so that configured policy can be queried. config.commit() policy = config.registry.queryUtility(IAuthenticationPolicy) if isinstance(policy, MultiAuthenticationPolicy): for name, subpolicy in policy.get_policies(): client.watch_execution_time(subpolicy, prefix='authentication', classname=name) else: client.watch_execution_time(policy, prefix='authentication') def on_new_response(event): request = event.request # Count unique users. user_id = request.prefixed_userid if user_id: # Get rid of colons in metric packet (see #1282). user_id = user_id.replace(':', '.') client.count('users', unique=user_id) # Count authentication verifications. if hasattr(request, 'authn_type'): client.count('authn_type.{}'.format(request.authn_type)) # Count view calls. service = request.current_service if service: client.count('view.{}.{}'.format(service.name, request.method)) config.add_subscriber(on_new_response, NewResponse) return client def install_middlewares(app, settings): 'Install a set of middlewares defined in the ini file on the given app.' # Setup new-relic. if settings.get('newrelic_config'): ini_file = settings['newrelic_config'] env = settings['newrelic_env'] newrelic.agent.initialize(ini_file, env) app = newrelic.agent.WSGIApplicationWrapper(app) # Adds the Werkzeug profiler. if asbool(settings.get('profiler_enabled')): profile_dir = settings['profiler_dir'] app = ProfilerMiddleware(app, profile_dir=profile_dir, restrictions=('*kinto.core*')) return app def setup_logging(config): """Setup structured logging, and emit `request.summary` event on each request, as recommanded by Mozilla Services standard: * https://mana.mozilla.org/wiki/display/CLOUDSERVICES/Logging+Standard * http://12factor.net/logs """ def on_new_request(event): request = event.request # Save the time the request was received by the server. event.request._received_at = utils.msec_time() try: # Pyramid fails if the URL contains invalid UTF-8 characters. request_path = event.request.path except UnicodeDecodeError: raise errors.http_error( HTTPBadRequest(), errno=errors.ERRORS.INVALID_PARAMETERS, message='Invalid URL path.') request.log_context(agent=request.headers.get('User-Agent'), path=request_path, method=request.method, querystring=dict(errors.request_GET(request)), lang=request.headers.get('Accept-Language'), uid=None, authn_type=None, errno=None) config.add_subscriber(on_new_request, NewRequest) def on_new_response(event): response = event.response request = event.request # Compute the request processing time in msec (-1 if unknown) current = utils.msec_time() duration = current - getattr(request, '_received_at', current - 1) isotimestamp = datetime.fromtimestamp(current/1000).isoformat() # Bind infos for request summary logger. request.log_context(time=isotimestamp, code=response.status_code, t=duration) try: # If error response, bind errno. request.log_context(errno=response.errno) except AttributeError: pass if not hasattr(request, 'parent'): # Ouput application request summary. summary_logger.info('', extra=request.log_context()) config.add_subscriber(on_new_response, NewResponse) class EventActionFilter: def __init__(self, actions, config): actions = ACTIONS.from_string_list(actions) self.actions = [action.value for action in actions] def phash(self): return 'for_actions = {}'.format(','.join(self.actions)) def __call__(self, event): action = event.payload.get('action') return not action or action in self.actions class EventResourceFilter: def __init__(self, resources, config): self.resources = resources def phash(self): return 'for_resources = {}'.format(','.join(self.resources)) def __call__(self, event): resource = event.payload.get('resource_name') return not resource or not self.resources or resource in self.resources def setup_listeners(config): # Register basic subscriber predicates, to filter events. config.add_subscriber_predicate('for_actions', EventActionFilter) config.add_subscriber_predicate('for_resources', EventResourceFilter) write_actions = (ACTIONS.CREATE, ACTIONS.UPDATE, ACTIONS.DELETE) settings = config.get_settings() project_name = settings.get('project_name', '') listeners = aslist(settings['event_listeners']) for name in listeners: logger.info("Setting up '{}' listener".format(name)) prefix = 'event_listeners.{}.'.format(name) try: listener_mod = config.maybe_dotted(name) prefix = 'event_listeners.{}.'.format(name.split('.')[-1]) listener = listener_mod.load_from_config(config, prefix) except (ImportError, AttributeError): module_setting = prefix + 'use' # Read from ENV or settings. module_value = utils.read_env('{}.{}'.format(project_name, module_setting), settings.get(module_setting)) listener_mod = config.maybe_dotted(module_value) listener = listener_mod.load_from_config(config, prefix) # If StatsD is enabled, monitor execution time of listeners. if getattr(config.registry, 'statsd', None): statsd_client = config.registry.statsd key = 'listeners.{}'.format(name) listener = statsd_client.timer(key)(listener.__call__) # Optional filter by event action. actions_setting = prefix + 'actions' # Read from ENV or settings. actions_value = utils.read_env('{}.{}'.format(project_name, actions_setting), settings.get(actions_setting, '')) actions = aslist(actions_value) if len(actions) > 0: actions = ACTIONS.from_string_list(actions) else: actions = write_actions # Optional filter by event resource name. resource_setting = prefix + 'resources' # Read from ENV or settings. resource_value = utils.read_env('{}.{}'.format(project_name, resource_setting), settings.get(resource_setting, '')) resource_names = aslist(resource_value) # Pyramid event predicates. options = dict(for_actions=actions, for_resources=resource_names) if ACTIONS.READ in actions: config.add_subscriber(listener, ResourceRead, **options) if len(actions) == 1: return config.add_subscriber(listener, ResourceChanged, **options) def load_default_settings(config, default_settings): """Read settings provided in Paste ini file, set default values and replace if defined as environment variable. """ settings = config.get_settings() project_name = settings['project_name'] def _prefixed_keys(key): unprefixed = key if key.startswith('kinto.') or key.startswith(project_name + '.'): unprefixed = key.split('.', 1)[1] project_prefix = '{}.{}'.format(project_name, unprefixed) kinto_prefix = 'kinto.{}'.format(unprefixed) return unprefixed, project_prefix, kinto_prefix # Fill settings with default values if not defined. for key, default_value in sorted(default_settings.items()): unprefixed, project_prefix, kinto_prefix = keys = _prefixed_keys(key) is_defined = len(set(settings.keys()).intersection(set(keys))) > 0 if not is_defined: settings[unprefixed] = default_value for key, value in sorted(settings.items()): unprefixed, project_prefix, kinto_prefix = keys = _prefixed_keys(key) # Fail if not only one is defined. defined = set(settings.keys()).intersection(set(keys)) distinct_values = set([str(settings[d]) for d in defined]) if len(defined) > 1 and len(distinct_values) > 1: names = "', '".join(defined) raise ValueError("Settings '{}' are in conflict.".format(names)) # Override settings from OS env values. # e.g. HTTP_PORT, READINGLIST_HTTP_PORT, KINTO_HTTP_PORT from_env = utils.read_env(unprefixed, value) from_env = utils.read_env(project_prefix, from_env) from_env = utils.read_env(kinto_prefix, from_env) settings[unprefixed] = from_env config.add_settings(settings) def initialize(config, version=None, project_name='', default_settings=None): """Initialize kinto.core with the given configuration, version and project name. This will basically include kinto.core in Pyramid and set route prefix based on the specified version. :param config: Pyramid configuration :type config: ~pyramid:pyramid.config.Configurator :param str version: Current project version (e.g. '0.0.1') if not defined in application settings. :param str project_name: Project name if not defined in application settings. :param dict default_settings: Override kinto.core default settings values. """ from kinto.core import DEFAULT_SETTINGS settings = config.get_settings() project_name = settings.pop('kinto.project_name', settings.get('project_name')) or project_name settings['project_name'] = project_name if not project_name: warnings.warn('No value specified for `project_name`') kinto_core_defaults = {**DEFAULT_SETTINGS} if default_settings: kinto_core_defaults.update(default_settings) load_default_settings(config, kinto_core_defaults) http_scheme = settings['http_scheme'] if http_scheme != 'https': warnings.warn('HTTPS is not enabled') # Override project version from settings. project_version = settings.get('project_version') or version if not project_version: error_msg = 'Invalid project version: {}'.format(project_version) raise ConfigurationError(error_msg) settings['project_version'] = project_version = str(project_version) # HTTP API version. http_api_version = settings.get('http_api_version') if http_api_version is None: # The API version is derivated from the module version if not provided. http_api_version = '.'.join(project_version.split('.')[0:2]) settings['http_api_version'] = http_api_version = str(http_api_version) api_version = 'v{}'.format(http_api_version.split('.')[0]) # Include kinto.core views with the correct api version prefix. config.include('kinto.core', route_prefix=api_version) config.route_prefix = api_version
1
11,159
You could shorten this to `qs or None`. But why not just build a dict of parameters we want to include and only add `querystring` if there's something here, similar to the way you do in the error view?
Kinto-kinto
py
@@ -31,15 +31,9 @@ func (c *client) acquireTopologyCacheLock(ctx context.Context) { } defer conn.Close() - go func() { - <-ctx.Done() - ticker.Stop() - c.unlockAdvisoryLock(context.Background(), conn, advisoryLockId) - }() - // Infinitely try to acquire the advisory lock // Once the lock is acquired we start caching, this is a blocking operation - for ; true; <-ticker.C { + for { c.log.Info("trying to acquire advisory lock") // TODO: We could in the future spread the load of the topology caching
1
package topology import ( "context" "crypto/sha256" "database/sql" "encoding/binary" "encoding/json" "time" "go.uber.org/zap" "google.golang.org/protobuf/encoding/protojson" topologyv1 "github.com/lyft/clutch/backend/api/topology/v1" "github.com/lyft/clutch/backend/service" ) const topologyCacheLockId = "topology:cache" // Performs leader election by acquiring a postgres advisory lock. // Once the lock is acquired topology caching is started. func (c *client) acquireTopologyCacheLock(ctx context.Context) { advisoryLockId := convertLockIdToAdvisoryLockId(topologyCacheLockId) ticker := time.NewTicker(time.Second * 10) // We create our own connection to use for acquiring the advisory lock // If the connection is severed for any reason the advisory lock will automatically unlock conn, err := c.db.Conn(ctx) if err != nil { c.log.Fatal("Unable to connect to the database", zap.Error(err)) } defer conn.Close() go func() { <-ctx.Done() ticker.Stop() c.unlockAdvisoryLock(context.Background(), conn, advisoryLockId) }() // Infinitely try to acquire the advisory lock // Once the lock is acquired we start caching, this is a blocking operation for ; true; <-ticker.C { c.log.Info("trying to acquire advisory lock") // TODO: We could in the future spread the load of the topology caching // across many clutch instances by having an a lock per service (e.g. AWS, k8s, etc) if c.tryAdvisoryLock(ctx, conn, advisoryLockId) { c.log.Info("acquired the advisory lock, starting to cache topology now...") go c.expireCache(ctx) c.startTopologyCache(ctx) } } } // TODO: The advisory locking logic can be decomposed into its own service (e.g. "global locking service"). // Which can be used generically for anything that needs distributed locking functionality. func (c *client) tryAdvisoryLock(ctx context.Context, conn *sql.Conn, lockId uint32) bool { var lock bool // Notably we do not use `pg_advisory_lock` as the behavior of this function stack locks. // For each lock invocation requires the same number of unlocks to release the advisory lock. // https://www.postgresql.org/docs/12/functions-admin.html#FUNCTIONS-ADVISORY-LOCKS-TABL if err := conn.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1);", lockId).Scan(&lock); err != nil { c.log.Error("Unable to query for a advisory lock", zap.Error(err)) } return lock } func (c *client) unlockAdvisoryLock(ctx context.Context, conn *sql.Conn, lockId uint32) bool { var unlock bool if err := conn.QueryRowContext(ctx, "SELECT pg_advisory_unlock($1)", lockId).Scan(&unlock); err != nil { c.log.Error("Unable to perform an advisory unlock", zap.Error(err)) } return unlock } func convertLockIdToAdvisoryLockId(lockID string) uint32 { x := sha256.New().Sum([]byte(lockID)) return binary.BigEndian.Uint32(x) } // This will check all services that are currently registered for the given clutch configuration // If any of the services implement the CacheableTopology interface we will start consuming // topology objects until the context has been cancelled. // func (c *client) startTopologyCache(ctx context.Context) { for n, s := range service.Registry { if svc, ok := s.(CacheableTopology); ok { if svc.CacheEnabled() { c.log.Info("Processing Topology Objects for service", zap.String("service", n)) topologyChannel, err := svc.StartTopologyCaching(ctx) if err != nil { c.log.Error("Unable to start topology caching", zap.String("service", n), zap.Error(err)) continue } go c.processTopologyObjectChannel(ctx, topologyChannel) } } } <-ctx.Done() } func (c *client) processTopologyObjectChannel(ctx context.Context, objs <-chan *topologyv1.UpdateCacheRequest) { for obj := range objs { switch obj.Action { case topologyv1.UpdateCacheRequest_CREATE_OR_UPDATE: if err := c.setCache(ctx, obj.Resource); err != nil { c.log.Error("Error setting cache", zap.Error(err)) } case topologyv1.UpdateCacheRequest_DELETE: if err := c.deleteCache(ctx, obj.Resource.Id); err != nil { c.log.Error("Error deleting cache", zap.Error(err)) } default: c.log.Warn("UpdateCacheRequest action is not implemented", zap.String("action", obj.Action.String())) } } } func (c *client) setCache(ctx context.Context, obj *topologyv1.Resource) error { const upsertQuery = ` INSERT INTO topology_cache (id, resolver_type_url, data, metadata) VALUES ($1, $2, $3, $4) ON CONFLICT (id) DO UPDATE SET resolver_type_url = EXCLUDED.resolver_type_url, data = EXCLUDED.data, metadata = EXCLUDED.metadata, updated_at = NOW() ` metadataJson, err := json.Marshal(obj.Metadata) if err != nil { c.scope.SubScope("cache").Counter("set.failure").Inc(1) return err } dataJson, err := protojson.Marshal(obj.Pb) if err != nil { c.scope.SubScope("cache").Counter("set.failure").Inc(1) return err } _, err = c.db.ExecContext( ctx, upsertQuery, obj.Id, obj.Pb.GetTypeUrl(), dataJson, metadataJson, ) if err != nil { c.scope.SubScope("cache").Counter("set.failure").Inc(1) return err } c.scope.SubScope("cache").Counter("set.success").Inc(1) return nil } func (c *client) deleteCache(ctx context.Context, id string) error { const deleteQuery = ` DELETE FROM topology_cache WHERE id = $1 ` _, err := c.db.ExecContext(ctx, deleteQuery, id) if err != nil { c.scope.SubScope("cache").Counter("delete.failure").Inc(1) return err } c.scope.SubScope("cache").Counter("delete.success").Inc(1) return nil } func (c *client) expireCache(ctx context.Context) { // Delete all entries that are older than two hours const expireQuery = ` DELETE FROM topology_cache WHERE updated_at <= NOW() - INTERVAL '120minutes'; ` ticker := time.NewTicker(time.Minute * 20) for { result, err := c.db.ExecContext(ctx, expireQuery) if err != nil { c.scope.SubScope("cache").Counter("expire.failure").Inc(1) c.log.Error("unable to expire cache", zap.Error(err)) continue } numOfItemsRemoved, err := result.RowsAffected() if err != nil { c.scope.SubScope("cache").Counter("expire.failure").Inc(1) c.log.Error("unable to get rows removed from cache expiry query", zap.Error(err)) } else { c.scope.SubScope("cache").Counter("expire.success").Inc(1) c.log.Info("successfully removed expired cache", zap.Int64("count", numOfItemsRemoved)) } select { case <-ticker.C: continue case <-ctx.Done(): ticker.Stop() return } } }
1
9,156
sorry if I'm missing it somewhere else in the code, but do you need to create a `ticker := time.NewTicker`?
lyft-clutch
go
@@ -44,7 +44,11 @@ import ( func TestNew(t *testing.T) { ctx, _ := SetupFakeContext(t) - c := NewController(ctx, configmap.NewStaticWatcher( + dataresidencySs := &dataresidency.StoreSingleton{} + + ctor := NewConstructor(dataresidencySs) + + c := ctor(ctx, configmap.NewStaticWatcher( &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: logging.ConfigMapName(),
1
/* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package trigger import ( "testing" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/configmap" "knative.dev/pkg/logging" "knative.dev/pkg/metrics" . "knative.dev/pkg/reconciler/testing" "knative.dev/pkg/system" tracingconfig "knative.dev/pkg/tracing/config" // Fake injection informers "github.com/google/knative-gcp/pkg/apis/configs/dataresidency" _ "github.com/google/knative-gcp/pkg/client/injection/informers/broker/v1beta1/broker/fake" _ "github.com/google/knative-gcp/pkg/client/injection/informers/broker/v1beta1/trigger/fake" _ "knative.dev/pkg/client/injection/ducks/duck/v1/addressable/fake" _ "knative.dev/pkg/client/injection/ducks/duck/v1/conditions/fake" _ "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/fake" _ "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake" _ "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" _ "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/fake" _ "knative.dev/pkg/injection/clients/dynamicclient/fake" ) func TestNew(t *testing.T) { ctx, _ := SetupFakeContext(t) c := NewController(ctx, configmap.NewStaticWatcher( &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: logging.ConfigMapName(), Namespace: system.Namespace(), }, Data: map[string]string{}, }, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: metrics.ConfigMapName(), Namespace: system.Namespace(), }, Data: map[string]string{}, }, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: tracingconfig.ConfigName, Namespace: system.Namespace(), }, Data: map[string]string{}, }, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: dataresidency.ConfigMapName(), Namespace: system.Namespace(), }, Data: map[string]string{}, }, )) if c == nil { t.Fatal("Expected NewController to return a non-nil value") } }
1
18,026
Inline this, as we don't use it again.
google-knative-gcp
go
@@ -39,7 +39,9 @@ exclude = ["ForwardFFTImageFilter", "templated_class", "HalfHermitianToRealInverseFFTImageFilter", "RealToHalfHermitianForwardFFTImageFilter", - "CustomColormapFunction"] + "CustomColormapFunction", + "ScanlineFilterCommon" # Segfault + ] wrongName = 0 totalName = 0
1
#========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ # a short program to check the value returned by the GetNameOfClass() methods from __future__ import print_function import itk import sys itk.auto_progress(2) # must force the load to return all the names with dir(itk) itk.force_load() # itk.ImageToImageFilter # a list of classes to exclude. Typically, the classes with a custom New() # method, which return a subclass of the current class exclude = ["ForwardFFTImageFilter", "InverseFFTImageFilter", "OutputWindow", "MultiThreaderBase", "FFTComplexToComplexImageFilter", "ComplexToComplexFFTImageFilter", "templated_class", "HalfHermitianToRealInverseFFTImageFilter", "RealToHalfHermitianForwardFFTImageFilter", "CustomColormapFunction"] wrongName = 0 totalName = 0 for t in dir(itk): if t not in exclude: T = itk.__dict__[t] # first case - that's a templated class if isinstance(T, itk.Vector.__class__) and len(T) > 0: # use only the first specialization - all of them return the same # name i = T.values()[0] # GetNameOfClass() is a virtual method of the LightObject class, # so we must instantiate an object with the New() method if 'New' in dir(i): I = i.New() # be sure that the type of the instantiated object is the same # than the one of the class. It can be different if the class # is an "abstract" one and don't provide any New() method. # In that case, the one of the superclass is used. if 'GetNameOfClass' in dir(I): # print("Checking", t) totalName += 1 n = I.GetNameOfClass() if n != t and itk.class_(I) == i: msg = "%s: wrong class name: %s" % (t, n) print(msg, file=sys.stderr) wrongName += 1 else: if 'New' in dir(T): I = T.New() if 'GetNameOfClass' in dir(I): # print("Checking", t) totalName += 1 n = I.GetNameOfClass() if n != t and itk.class_(I) == T: msg = "%s: wrong class name: %s" % (t, n) print(msg, file=sys.stderr) wrongName += 1 print("%s classes checked." % totalName) if wrongName: print( "%s classes are not providing the correct name." % wrongName, file=sys.stderr) sys.exit(1)
1
10,047
I think the instantiation of the new objects i.New() and T.New() need to be done in a function to have local variable and reduce the amount of memory used.
InsightSoftwareConsortium-ITK
py
@@ -40,6 +40,8 @@ namespace pwiz.Skyline.Model.Hibernate public const string Mz = "0.####"; public const string SamplingTime = "0.00"; public const string OneOverK0 = "0.####"; + + public const string IonMobility = "0.#####"; // ReSharper restore LocalizableElement } }
1
/* * Original author: Nick Shulman <nicksh .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2009 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; namespace pwiz.Skyline.Model.Hibernate { public static class Formats { // ReSharper disable LocalizableElement public const String RETENTION_TIME = "0.##"; public const String PEAK_FOUND_RATIO = "0.##"; public const String STANDARD_RATIO = "0.####"; public const String GLOBAL_STANDARD_RATIO = "0.0000E+0"; public const String PEAK_AREA = "0"; public const String PEAK_AREA_NORMALIZED = "0.####%"; public const String OPT_PARAMETER = "0.#"; public const String MASS_ERROR = "0.#"; public const String CV = "0.#%"; public const string PValue = "0.0000"; public const string FoldChange = "0.####"; public const string CalibrationCurve = "0.0000E+0"; public const string Concentration = "0.####"; public const string RoundTrip = "R"; public const string Mz = "0.####"; public const string SamplingTime = "0.00"; public const string OneOverK0 = "0.####"; // ReSharper restore LocalizableElement } }
1
14,039
This is probably more digits than needed - perhaps rename OneOverK0 to IonMobility instead
ProteoWizard-pwiz
.cs
@@ -25,5 +25,8 @@ public enum ApiMethodType { CallableMethod, // PHP OptionalArrayMethod, - PagedOptionalArrayMethod + PagedOptionalArrayMethod, + // C# + FlattenedMethodAsyncCallSettings, + FlattenedMethodAsyncCancellationToken, }
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.viewmodel; public enum ApiMethodType { // Java PagedFlattenedMethod, PagedRequestObjectMethod, PagedCallableMethod, UnpagedListCallableMethod, FlattenedMethod, RequestObjectMethod, CallableMethod, // PHP OptionalArrayMethod, PagedOptionalArrayMethod }
1
17,747
End with 'Method', so: - FlattenedAsyncCallSettingsMethod - FlattenedAsyncCancellationTokenMethod
googleapis-gapic-generator
java
@@ -22,6 +22,7 @@ import ( cloudkms "cloud.google.com/go/kms/apiv1" "gocloud.dev/gcp" + "gocloud.dev/internal/secrets" "google.golang.org/api/option" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" )
1
// Copyright 2018 The Go Cloud Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limtations under the License. // Package gcpkms provides functionality to encrypt and decrypt secrets using // Google Cloud KMS. package gcpkms import ( "context" "fmt" cloudkms "cloud.google.com/go/kms/apiv1" "gocloud.dev/gcp" "google.golang.org/api/option" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" ) // endPoint is the address to access Google Cloud KMS API. const endPoint = "cloudkms.googleapis.com:443" // Dial returns a client to use with Cloud KMS and a clean-up function to close // the client after used. func Dial(ctx context.Context, ts gcp.TokenSource) (*cloudkms.KeyManagementClient, func(), error) { c, err := cloudkms.NewKeyManagementClient(ctx, option.WithTokenSource(ts)) return c, func() { c.Close() }, err } // NewCrypter returns a new Crypter to to encryption and decryption. func NewCrypter(client *cloudkms.KeyManagementClient, ki *KeyID) *Crypter { return &Crypter{ keyID: ki, client: client, } } // KeyID includes related information to construct a key name that is managed // by Cloud KMS. // See https://cloud.google.com/kms/docs/object-hierarchy#key for more // information. type KeyID struct { ProjectID, Location, KeyRing, Key string } func (ki *KeyID) String() string { return fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", ki.ProjectID, ki.Location, ki.KeyRing, ki.Key) } // Crypter contains information to construct the pull path of a key. // TODO(#1066): make this unexported when there is a top-level portable API. type Crypter struct { keyID *KeyID client *cloudkms.KeyManagementClient } // Decrypt decrypts the ciphertext using the key constructed from ki. func (c *Crypter) Decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) { req := &kmspb.DecryptRequest{ Name: c.keyID.String(), Ciphertext: ciphertext, } resp, err := c.client.Decrypt(ctx, req) if err != nil { return nil, err } return resp.GetPlaintext(), nil } // Encrypt encrypts the plaintext into a ciphertext. func (c *Crypter) Encrypt(ctx context.Context, plaintext []byte) ([]byte, error) { req := &kmspb.EncryptRequest{ Name: c.keyID.String(), Plaintext: plaintext, } resp, err := c.client.Encrypt(ctx, req) if err != nil { return nil, err } return resp.GetCiphertext(), nil }
1
13,542
`localsecrets` should be updated in the same way.
google-go-cloud
go
@@ -94,13 +94,18 @@ const ConsensusV17 = ConsensusVersion( "https://github.com/algorandfoundation/specs/tree/5615adc36bad610c7f165fa2967f4ecfa75125f0", ) +// ConsensusV18 adds the ability to issue a transaction that marks an account non-participating +const ConsensusV18 = ConsensusVersion( + "---->!!!TODO!!!<----", +) + // !!! ********************* !!! // !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!! // !!! ********************* !!! // ConsensusCurrentVersion is the latest version and should be used // when a specific version is not provided. -const ConsensusCurrentVersion = ConsensusV17 +const ConsensusCurrentVersion = ConsensusV18 // ConsensusTest0 is a version of ConsensusV0 used for testing // (it has different approved upgrade paths).
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package protocol // ConsensusVersion is a string that identifies a version of the // consensus protocol. type ConsensusVersion string // DEPRECATEDConsensusV0 is a baseline version of the Algorand consensus protocol. // at the time versioning was introduced. // It is now deprecated. const DEPRECATEDConsensusV0 = ConsensusVersion("v0") // DEPRECATEDConsensusV1 adds support for Genesis ID in transactions, but does not // require it (transactions missing a GenesisID value are still allowed). // It is now deprecated. const DEPRECATEDConsensusV1 = ConsensusVersion("v1") // DEPRECATEDConsensusV2 fixes a bug in the agreement protocol where proposalValues // fail to commit to the original period and sender of a block. const DEPRECATEDConsensusV2 = ConsensusVersion("v2") // DEPRECATEDConsensusV3 adds support for fine-grained ephemeral keys. const DEPRECATEDConsensusV3 = ConsensusVersion("v3") // DEPRECATEDConsensusV4 adds support for a min balance and a transaction that // closes out an account. const DEPRECATEDConsensusV4 = ConsensusVersion("v4") // DEPRECATEDConsensusV5 sets MinTxnFee to 1000 and fixes a blance lookback bug const DEPRECATEDConsensusV5 = ConsensusVersion("v5") // DEPRECATEDConsensusV6 adds support for explicit ephemeral-key parameters const DEPRECATEDConsensusV6 = ConsensusVersion("v6") // ConsensusV7 increases MaxBalLookback to 320 in preparation for // the twin seeds change. const ConsensusV7 = ConsensusVersion("v7") // ConsensusV8 uses the new parameters and seed derivation policy // from the agreement protocol's security analysis. const ConsensusV8 = ConsensusVersion("v8") // ConsensusV9 increases min balance to 100,000 microAlgos. const ConsensusV9 = ConsensusVersion("v9") // ConsensusV10 introduces fast partition recovery. const ConsensusV10 = ConsensusVersion("v10") // ConsensusV11 introduces efficient encoding of SignedTxn using SignedTxnInBlock. const ConsensusV11 = ConsensusVersion("v11") // ConsensusV12 increases the maximum length of a version string. const ConsensusV12 = ConsensusVersion("v12") // ConsensusV13 makes the consensus version a meaningful string. const ConsensusV13 = ConsensusVersion( // Points to version of the Algorand spec as of May 21, 2019. "https://github.com/algorand/spec/tree/0c8a9dc44d7368cc266d5407b79fb3311f4fc795", ) // ConsensusV14 adds tracking of closing amounts in ApplyData, // and enables genesis hash in transactions. const ConsensusV14 = ConsensusVersion( "https://github.com/algorand/spec/tree/2526b6ae062b4fe5e163e06e41e1d9b9219135a9", ) // ConsensusV15 adds tracking of reward distributions in ApplyData. const ConsensusV15 = ConsensusVersion( "https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622", ) // ConsensusV16 fixes domain separation in Credentials and requires GenesisHash. const ConsensusV16 = ConsensusVersion( "https://github.com/algorand/spec/tree/22726c9dcd12d9cddce4a8bd7e8ccaa707f74101", ) // ConsensusV17 points to 'final' spec commit const ConsensusV17 = ConsensusVersion( "https://github.com/algorandfoundation/specs/tree/5615adc36bad610c7f165fa2967f4ecfa75125f0", ) // !!! ********************* !!! // !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!! // !!! ********************* !!! // ConsensusCurrentVersion is the latest version and should be used // when a specific version is not provided. const ConsensusCurrentVersion = ConsensusV17 // ConsensusTest0 is a version of ConsensusV0 used for testing // (it has different approved upgrade paths). const ConsensusTest0 = ConsensusVersion("test0") // ConsensusTest1 is an extension of ConsensusTest0 that // supports a sorted-list balance commitment. const ConsensusTest1 = ConsensusVersion("test1") // ConsensusTestBigBlocks is a version of ConsensusV0 used for testing // with big block size (large MaxTxnBytesPerBlock). // at the time versioning was introduced. const ConsensusTestBigBlocks = ConsensusVersion("test-big-blocks") // ConsensusTestRapidRewardRecalculation is a version of ConsensusCurrentVersion // that decreases the RewardRecalculationInterval greatly. const ConsensusTestRapidRewardRecalculation = ConsensusVersion("test-fast-reward-recalculation") // ConsensusTestFastUpgrade is meant for testing of protocol upgrades: // during testing, it is equivalent to another protocol with the exception // of the upgrade parameters, which allow for upgrades to take place after // only a few rounds. func ConsensusTestFastUpgrade(proto ConsensusVersion) ConsensusVersion { return "test-fast-upgrade-" + proto }
1
36,246
We'll want to PR a spec change into github.com/algorandfoundation/specs. (Side note: It might or might not make sense to combine this protocol upgrade with Tsachi's protocol upgrade for fixing the reward rate calculation.)
algorand-go-algorand
go
@@ -30,7 +30,7 @@ func (w *DefaultWorker) Generate( nullBlockCount abi.ChainEpoch, posts []block.PoStProof, drandEntries []*drand.Entry, -) Output { +) (*block.Block, []*types.SignedMessage, []*types.SignedMessage, error) { generateTimer := time.Now() defer func() {
1
package mining // Block generation is part of the logic of the DefaultWorker. // 'generate' is that function that actually creates a new block from a base // TipSet using the DefaultWorker's many utilities. import ( "context" "time" "github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/pkg/errors" bls "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-filecoin/internal/pkg/block" "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" "github.com/filecoin-project/go-filecoin/internal/pkg/drand" e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" "github.com/filecoin-project/go-filecoin/internal/pkg/types" ) // Generate returns a new block created from the messages in the pool. // The resulting output is not empty: it has either a block or an error. func (w *DefaultWorker) Generate( ctx context.Context, baseTipSet block.TipSet, ticket block.Ticket, electionProof crypto.VRFPi, nullBlockCount abi.ChainEpoch, posts []block.PoStProof, drandEntries []*drand.Entry, ) Output { generateTimer := time.Now() defer func() { log.Infof("[TIMER] DefaultWorker.Generate baseTipset: %s - elapsed time: %s", baseTipSet.String(), time.Since(generateTimer).Round(time.Millisecond)) }() weight, err := w.getWeight(ctx, baseTipSet) if err != nil { return NewOutputErr(errors.Wrap(err, "get weight")) } baseHeight, err := baseTipSet.Height() if err != nil { return NewOutputErr(errors.Wrap(err, "get base tip set height")) } blockHeight := baseHeight + nullBlockCount + 1 // Construct list of message candidates for inclusion. // These messages will be processed, and those that fail excluded from the block. pending := w.messageSource.Pending() mq := NewMessageQueue(pending) candidateMsgs := orderMessageCandidates(mq.Drain(block.BlockMessageLimit)) candidateMsgs = w.filterPenalizableMessages(ctx, candidateMsgs) if len(candidateMsgs) > block.BlockMessageLimit { return NewOutputErr(errors.Errorf("too many messages returned from mq.Drain: %d", len(candidateMsgs))) } var blsAccepted []*types.SignedMessage var secpAccepted []*types.SignedMessage // Align the results with the candidate signed messages to accumulate the messages lists // to include in the block, and handle failed messages. for _, msg := range candidateMsgs { if msg.Message.From.Protocol() == address.BLS { blsAccepted = append(blsAccepted, msg) } else { secpAccepted = append(secpAccepted, msg) } } // Create an aggregage signature for messages unwrappedBLSMessages, blsAggregateSig, err := aggregateBLS(blsAccepted) if err != nil { return NewOutputErr(errors.Wrap(err, "could not aggregate bls messages")) } // Persist messages to ipld storage txMetaCid, err := w.messageStore.StoreMessages(ctx, secpAccepted, unwrappedBLSMessages) if err != nil { return NewOutputErr(errors.Wrap(err, "error persisting messages")) } // get tipset state root and receipt root baseStateRoot, err := w.tsMetadata.GetTipSetStateRoot(baseTipSet.Key()) if err != nil { return NewOutputErr(errors.Wrapf(err, "error retrieving state root for tipset %s", baseTipSet.Key().String())) } baseReceiptRoot, err := w.tsMetadata.GetTipSetReceiptsRoot(baseTipSet.Key()) if err != nil { return NewOutputErr(errors.Wrapf(err, "error retrieving receipt root for tipset %s", baseTipSet.Key().String())) } // Set the block timestamp to be exactly the start of the target epoch, regardless of the current time. // The real time might actually be much later than this if catching up from a pause in chain progress. epochStartTime := w.clock.StartTimeOfEpoch(blockHeight) if drandEntries == nil { drandEntries = []*drand.Entry{} } if posts == nil { posts = []block.PoStProof{} } next := &block.Block{ Miner: w.minerAddr, Height: blockHeight, BeaconEntries: drandEntries, ElectionProof: &crypto.ElectionProof{VRFProof: electionProof}, Messages: e.NewCid(txMetaCid), MessageReceipts: e.NewCid(baseReceiptRoot), Parents: baseTipSet.Key(), ParentWeight: weight, PoStProofs: posts, StateRoot: e.NewCid(baseStateRoot), Ticket: ticket, Timestamp: uint64(epochStartTime.Unix()), BLSAggregateSig: &blsAggregateSig, } view, err := w.api.PowerStateView(baseTipSet.Key()) if err != nil { return NewOutputErr(errors.Wrapf(err, "failed to read state view")) } _, workerAddr, err := view.MinerControlAddresses(ctx, w.minerAddr) if err != nil { return NewOutputErr(errors.Wrap(err, "failed to read workerAddr during block generation")) } workerSigningAddr, err := view.AccountSignerAddress(ctx, workerAddr) if err != nil { return NewOutputErr(errors.Wrap(err, "failed to convert worker address to signing address")) } blockSig, err := w.workerSigner.SignBytes(ctx, next.SignatureData(), workerSigningAddr) if err != nil { return NewOutputErr(errors.Wrap(err, "failed to sign block")) } next.BlockSig = &blockSig return NewOutput(next, blsAccepted, secpAccepted) } func aggregateBLS(blsMessages []*types.SignedMessage) ([]*types.UnsignedMessage, crypto.Signature, error) { var sigs []bls.Signature var unwrappedMsgs []*types.UnsignedMessage for _, msg := range blsMessages { // unwrap messages unwrappedMsgs = append(unwrappedMsgs, &msg.Message) if msg.Signature.Type != crypto.SigTypeBLS { return []*types.UnsignedMessage{}, crypto.Signature{}, errors.New("non-BLS message signature") } // store message signature as bls signature blsSig := bls.Signature{} copy(blsSig[:], msg.Signature.Data) sigs = append(sigs, blsSig) } blsAggregateSig := bls.Aggregate(sigs) if blsAggregateSig == nil { return []*types.UnsignedMessage{}, crypto.Signature{}, errors.New("could not aggregate signatures") } return unwrappedMsgs, crypto.Signature{ Type: crypto.SigTypeBLS, Data: blsAggregateSig[:], }, nil } // When a block is validated, BLS messages are processed first, so for simplicity all BLS // messages are considered first here too. func orderMessageCandidates(messages []*types.SignedMessage) []*types.SignedMessage { blsMessages := []*types.SignedMessage{} secpMessages := []*types.SignedMessage{} for _, m := range messages { if m.Message.From.Protocol() == address.BLS { blsMessages = append(blsMessages, m) } else { secpMessages = append(secpMessages, m) } } return append(blsMessages, secpMessages...) } func (w *DefaultWorker) filterPenalizableMessages(ctx context.Context, messages []*types.SignedMessage) []*types.SignedMessage { var goodMessages []*types.SignedMessage for _, msg := range messages { err := w.penaltyChecker.PenaltyCheck(ctx, &msg.Message) if err != nil { mCid, _ := msg.Cid() log.Debugf("Msg: %s excluded in block because penalized with err %s", mCid, err) continue } goodMessages = append(goodMessages, msg) } return goodMessages }
1
23,827
Use `FullBlock`, it comes from the same package.
filecoin-project-venus
go
@@ -1,9 +1,13 @@ +import abc import json import logging import time from concurrent.futures import ThreadPoolExecutor +from typing import Collection, Dict, Optional import requests +from flask import Request +from readerwriterlock import rwlock from localstack import config from localstack.utils.bootstrap import canonicalize_api_names
1
import json import logging import time from concurrent.futures import ThreadPoolExecutor import requests from localstack import config from localstack.utils.bootstrap import canonicalize_api_names from localstack.utils.common import clone # set up logger LOG = logging.getLogger(__name__) # map of service plugins, mapping from service name to plugin details SERVICE_PLUGINS = {} # maps service names to health status STATUSES = {} # ----------------- # PLUGIN UTILITIES # ----------------- class Plugin(object): def __init__(self, name, start, check=None, listener=None, priority=0, active=False): self.plugin_name = name self.start_function = start self.listener = listener self.check_function = check self.priority = priority self.default_active = active def start(self, asynchronous): kwargs = {"asynchronous": asynchronous} if self.listener: kwargs["update_listener"] = self.listener return self.start_function(**kwargs) def check(self, expect_shutdown=False, print_error=False): if not self.check_function: return return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error) def name(self): return self.plugin_name def is_enabled(self, api_names=None): if self.default_active: return True if api_names is None: api_names = canonicalize_api_names() return self.name() in api_names def register_plugin(plugin): existing = SERVICE_PLUGINS.get(plugin.name()) if existing: if existing.priority > plugin.priority: return SERVICE_PLUGINS[plugin.name()] = plugin # ------------------------- # HEALTH CHECK API METHODS # ------------------------- def get_services_health(reload=False): if reload: reload_services_health() result = clone(dict(STATUSES)) result.get("services", {}).pop("edge", None) return result def set_services_health(data): status = STATUSES["services"] = STATUSES.get("services", {}) for key, value in dict(data).items(): parent, _, child = key.partition(":") if child: STATUSES[parent] = STATUSES.get(parent, {}) STATUSES[parent][child] = value data.pop(key) status.update(data or {}) return get_services_health() # ----------------------------- # INFRASTRUCTURE HEALTH CHECKS # ----------------------------- def check_infra(retries=10, expect_shutdown=False, apis=None, additional_checks=[]): try: apis = apis or canonicalize_api_names() print_error = retries <= 0 # loop through plugins and check service status for name, plugin in SERVICE_PLUGINS.items(): if name in apis: check_service_health( api=name, print_error=print_error, expect_shutdown=expect_shutdown ) for additional in additional_checks: additional(expect_shutdown=expect_shutdown) except Exception as e: if retries <= 0: LOG.exception("Error checking state of local environment (after some retries)") raise e time.sleep(3) check_infra( retries - 1, expect_shutdown=expect_shutdown, apis=apis, additional_checks=additional_checks, ) def wait_for_infra_shutdown(apis=None): apis = apis or canonicalize_api_names() names = [name for name, plugin in SERVICE_PLUGINS.items() if name in apis] def check(name): check_service_health(api=name, expect_shutdown=True) LOG.debug("[shutdown] api %s has shut down", name) # no special significance to 10 workers, seems like a reasonable number given the number of services we have with ThreadPoolExecutor(max_workers=10) as executor: executor.map(check, names) def check_service_health(api, print_error=False, expect_shutdown=False): try: plugin = SERVICE_PLUGINS.get(api) plugin.check(expect_shutdown=expect_shutdown, print_error=print_error) record_service_health(api, "running") except Exception as e: if not expect_shutdown: LOG.warning('Service "%s" not yet available, retrying...' % api) else: LOG.warning('Service "%s" still shutting down, retrying...' % api) raise e def reload_services_health(): check_infra(retries=0) def record_service_health(api, status): # TODO: consider making in-memory calls here, to optimize performance data = {api: status} health_url = "%s/health" % config.get_edge_url() try: requests.put(health_url, data=json.dumps(data), verify=False) except Exception: # ignore for now, if the service is not running pass
1
12,997
do we plan to use anything else than flask for making HTTP requests inside localstack? if so, it maybe makes sense not to strongly couple to flask for now, and just leave the type of the `request` function parameters open for now.
localstack-localstack
py
@@ -143,9 +143,17 @@ func (verifyContentLanguage) ErrorCheck(b *blob.Bucket, err error) error { } func (verifyContentLanguage) BeforeWrite(as func(interface{}) bool) error { + var objp **storage.ObjectHandle + if !as(&objp) { + return errors.New("Writer.As failed to get ObjectHandle") + } + // Replace the ObjectHandle with a new one that adds Conditions + // (this condition will always be true). + *objp = (*objp).If(storage.Conditions{DoesNotExist: true}) + var sw *storage.Writer if !as(&sw) { - return errors.New("Writer.As failed") + return errors.New("Writer.As failed to get Writer") } sw.ContentLanguage = language return nil
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gcsblob import ( "context" "errors" "flag" "fmt" "io/ioutil" "net/http" "net/url" "os" "testing" "cloud.google.com/go/storage" "github.com/google/go-cmp/cmp" "gocloud.dev/blob" "gocloud.dev/blob/driver" "gocloud.dev/blob/drivertest" "gocloud.dev/gcp" "gocloud.dev/internal/testing/setup" "google.golang.org/api/googleapi" ) const ( // These constants capture values that were used during the last -record. // // If you want to use --record mode, // 1. Create a bucket in your GCP project: // https://console.cloud.google.com/storage/browser, then "Create Bucket". // 2. Update the bucketName constant to your bucket name. // 3. Create a service account in your GCP project and update the // serviceAccountID constant to it. // 4. Download a private key to a .pem file as described here: // https://godoc.org/cloud.google.com/go/storage#SignedURLOptions // and pass a path to it via the --privatekey flag. // TODO(issue #300): Use Terraform to provision a bucket, and get the bucket // name from the Terraform output instead (saving a copy of it for replay). bucketName = "go-cloud-blob-test-bucket" serviceAccountID = "[email protected]" ) var pathToPrivateKey = flag.String("privatekey", "", "path to .pem file containing private key (required for --record)") type harness struct { client *gcp.HTTPClient opts *Options rt http.RoundTripper closer func() } func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) { opts := &Options{GoogleAccessID: serviceAccountID} if *setup.Record { if *pathToPrivateKey == "" { t.Fatalf("--privatekey is required in --record mode.") } // Use a real private key for signing URLs during -record. pk, err := ioutil.ReadFile(*pathToPrivateKey) if err != nil { t.Fatalf("Couldn't find private key at %v: %v", *pathToPrivateKey, err) } opts.PrivateKey = pk } else { // Use a dummy signer in replay mode. opts.SignBytes = func(b []byte) ([]byte, error) { return []byte("signed!"), nil } } client, rt, done := setup.NewGCPClient(ctx, t) return &harness{client: client, opts: opts, rt: rt, closer: done}, nil } func (h *harness) HTTPClient() *http.Client { return &h.client.Client } func (h *harness) MakeDriver(ctx context.Context) (driver.Bucket, error) { return openBucket(ctx, h.client, bucketName, h.opts) } func (h *harness) Close() { h.closer() } func TestConformance(t *testing.T) { drivertest.RunConformanceTests(t, newHarness, []drivertest.AsTest{verifyContentLanguage{}}) } func BenchmarkGcsblob(b *testing.B) { ctx := context.Background() creds, err := gcp.DefaultCredentials(ctx) if err != nil { b.Fatal(err) } client, err := gcp.NewHTTPClient(gcp.DefaultTransport(), gcp.CredentialsTokenSource(creds)) if err != nil { b.Fatal(err) } bkt, err := OpenBucket(context.Background(), client, bucketName, nil) drivertest.RunBenchmarks(b, bkt) } const language = "nl" // verifyContentLanguage uses As to access the underlying GCS types and // read/write the ContentLanguage field. type verifyContentLanguage struct{} func (verifyContentLanguage) Name() string { return "verify ContentLanguage can be written and read through As" } func (verifyContentLanguage) BucketCheck(b *blob.Bucket) error { var client *storage.Client if !b.As(&client) { return errors.New("Bucket.As failed") } return nil } func (verifyContentLanguage) ErrorCheck(b *blob.Bucket, err error) error { // Can't really verify this one because the storage library returns // a sentinel error, storage.ErrObjectNotExist, for "not exists" // instead of the supported As type googleapi.Error. // Call ErrorAs anyway, and expect it to fail. var to *googleapi.Error if b.ErrorAs(err, &to) { return errors.New("expected ErrorAs to fail") } return nil } func (verifyContentLanguage) BeforeWrite(as func(interface{}) bool) error { var sw *storage.Writer if !as(&sw) { return errors.New("Writer.As failed") } sw.ContentLanguage = language return nil } func (verifyContentLanguage) BeforeCopy(as func(interface{}) bool) error { var copier *storage.Copier if !as(&copier) { return errors.New("BeforeCopy.As failed") } return nil } func (verifyContentLanguage) BeforeList(as func(interface{}) bool) error { var q *storage.Query if !as(&q) { return errors.New("List.As failed") } // Nothing to do. return nil } func (verifyContentLanguage) AttributesCheck(attrs *blob.Attributes) error { var oa storage.ObjectAttrs if !attrs.As(&oa) { return errors.New("Attributes.As returned false") } if got := oa.ContentLanguage; got != language { return fmt.Errorf("got %q want %q", got, language) } return nil } func (verifyContentLanguage) ReaderCheck(r *blob.Reader) error { var rr *storage.Reader if !r.As(&rr) { return errors.New("Reader.As returned false") } // GCS doesn't return Content-Language via storage.Reader. return nil } func (verifyContentLanguage) ListObjectCheck(o *blob.ListObject) error { var oa storage.ObjectAttrs if !o.As(&oa) { return errors.New("ListObject.As returned false") } if o.IsDir { return nil } if got := oa.ContentLanguage; got != language { return fmt.Errorf("got %q want %q", got, language) } return nil } // GCS-specific unit tests. func TestBufferSize(t *testing.T) { tests := []struct { size int want int }{ { size: 5 * 1024 * 1024, want: 5 * 1024 * 1024, }, { size: 0, want: googleapi.DefaultUploadChunkSize, }, { size: -1024, want: 0, }, } for i, test := range tests { got := bufferSize(test.size) if got != test.want { t.Errorf("%d) got buffer size %d, want %d", i, got, test.want) } } } func TestOpenBucket(t *testing.T) { tests := []struct { description string bucketName string nilClient bool want string wantErr bool }{ { description: "empty bucket name results in error", wantErr: true, }, { description: "nil client results in error", bucketName: "foo", nilClient: true, wantErr: true, }, { description: "success", bucketName: "foo", want: "foo", }, } ctx := context.Background() for _, test := range tests { t.Run(test.description, func(t *testing.T) { var client *gcp.HTTPClient if !test.nilClient { var done func() client, _, done = setup.NewGCPClient(ctx, t) defer done() } // Create driver impl. drv, err := openBucket(ctx, client, test.bucketName, nil) if (err != nil) != test.wantErr { t.Errorf("got err %v want error %v", err, test.wantErr) } if err == nil && drv != nil && drv.name != test.want { t.Errorf("got %q want %q", drv.name, test.want) } // Create portable type. _, err = OpenBucket(ctx, client, test.bucketName, nil) if (err != nil) != test.wantErr { t.Errorf("got err %v want error %v", err, test.wantErr) } }) } } func TestURLOpenerForParams(t *testing.T) { ctx := context.Background() // Create a file for use as a dummy private key file. privateKey := []byte("some content") pkFile, err := ioutil.TempFile("", "my-private-key") if err != nil { t.Fatal(err) } defer os.Remove(pkFile.Name()) if _, err := pkFile.Write(privateKey); err != nil { t.Fatal(err) } if err := pkFile.Close(); err != nil { t.Fatal(err) } tests := []struct { name string currOpts Options query url.Values wantOpts Options wantErr bool }{ { name: "InvalidParam", query: url.Values{ "foo": {"bar"}, }, wantErr: true, }, { name: "AccessID", query: url.Values{ "access_id": {"bar"}, }, wantOpts: Options{GoogleAccessID: "bar"}, }, { name: "AccessID override", currOpts: Options{GoogleAccessID: "foo"}, query: url.Values{ "access_id": {"bar"}, }, wantOpts: Options{GoogleAccessID: "bar"}, }, { name: "AccessID not overridden", currOpts: Options{GoogleAccessID: "bar"}, wantOpts: Options{GoogleAccessID: "bar"}, }, { name: "BadPrivateKeyPath", query: url.Values{ "private_key_path": {"/path/does/not/exist"}, }, wantErr: true, }, { name: "PrivateKeyPath", query: url.Values{ "private_key_path": {pkFile.Name()}, }, wantOpts: Options{PrivateKey: privateKey}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { o := &URLOpener{Options: test.currOpts} got, err := o.forParams(ctx, test.query) if (err != nil) != test.wantErr { t.Errorf("got err %v want error %v", err, test.wantErr) } if err != nil { return } if diff := cmp.Diff(got, &test.wantOpts); diff != "" { t.Errorf("opener.forParams(...) diff (-want +got):\n%s", diff) } }) } } func TestOpenBucketFromURL(t *testing.T) { cleanup := setup.FakeGCPDefaultCredentials(t) defer cleanup() pkFile, err := ioutil.TempFile("", "my-private-key") if err != nil { t.Fatal(err) } defer os.Remove(pkFile.Name()) if err := ioutil.WriteFile(pkFile.Name(), []byte("key"), 0666); err != nil { t.Fatal(err) } tests := []struct { URL string WantErr bool }{ // OK. {"gs://mybucket", false}, // OK, setting access_id. {"gs://mybucket?access_id=foo", false}, // OK, setting private_key_path. {"gs://mybucket?private_key_path=" + pkFile.Name(), false}, // Invalid private_key_path. {"gs://mybucket?private_key_path=invalid-path", true}, // Invalid parameter. {"gs://mybucket?param=value", true}, } ctx := context.Background() for _, test := range tests { _, err := blob.OpenBucket(ctx, test.URL) if (err != nil) != test.WantErr { t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr) } } }
1
17,074
I think we should make this a separate test case: it seems like making the precondition fail is the test case that would ensure that this escape hatch worked. Otherwise, if it's always true, then it would be the same as if the escape hatch didn't modify the outgoing request.
google-go-cloud
go
@@ -62,6 +62,13 @@ func (s *Server) Start(ctx context.Context) error { return errors.Wrap(err, "error when subscribe to block") } + // sync genesis block + genesisBlk, err := s.bc.GetBlockByHeight(0) + if err != nil { + return errors.Wrap(err, "error when get genesis block") + } + s.idx.BuildIndex(genesisBlk) + go func() { for { select {
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package indexservice import ( "github.com/pkg/errors" "golang.org/x/net/context" "encoding/hex" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/db/rds" ) // Server is the container of the index service type Server struct { cfg *config.Config idx *Indexer bc blockchain.Blockchain blockCh chan *blockchain.Block } // NewServer instantiates an index service func NewServer( cfg *config.Config, bc blockchain.Blockchain, ) *Server { return &Server{ cfg: cfg, idx: &Indexer{ cfg: cfg.Indexer, rds: nil, hexEncodedNodeAddr: "", }, bc: bc, } } // Start starts the explorer server func (s *Server) Start(ctx context.Context) error { addr := s.cfg.Indexer.NodeAddr if addr == "" { blockAddr, err := s.cfg.BlockchainAddress() if err != nil { return errors.Wrap(err, "error when get the blockchain address") } addr = hex.EncodeToString(blockAddr.Bytes()[:]) } s.idx.hexEncodedNodeAddr = addr s.idx.rds = rds.NewAwsRDS(&s.cfg.DB.RDS) if err := s.idx.rds.Start(ctx); err != nil { return errors.Wrap(err, "error when start rds store") } s.blockCh = make(chan *blockchain.Block) if err := s.bc.SubscribeBlockCreation(s.blockCh); err != nil { return errors.Wrap(err, "error when subscribe to block") } go func() { for { select { case blk := <-s.blockCh: s.idx.BuildIndex(blk) } } }() return nil } // Stop stops the explorer server func (s *Server) Stop(ctx context.Context) error { if err := s.idx.rds.Stop(ctx); err != nil { return errors.Wrap(err, "error when shutting down explorer http server") } if err := s.bc.UnSubscribeBlockCreation(s.blockCh); err != nil { return errors.Wrap(err, "error when un subscribe block creation") } close(s.blockCh) return nil } // Indexer return indexer interface func (s *Server) Indexer() *Indexer { return s.idx }
1
12,897
Error return value of `s.idx.BuildIndex` is not checked
iotexproject-iotex-core
go
@@ -36,6 +36,8 @@ def gen_classes(): pass elif member is configtypes.MappingType: pass + elif member in [configtypes.List, configtypes.LengthList]: + pass elif member is configtypes.FormatString: yield functools.partial(member, fields=['a', 'b']) elif issubclass(member, configtypes.BaseType):
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]> # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Hypothesis tests for qutebrowser.config.configtypes.""" import os import sys import inspect import functools import pytest import hypothesis from hypothesis import strategies from qutebrowser.config import configtypes, configexc def gen_classes(): for _name, member in inspect.getmembers(configtypes, inspect.isclass): if member is configtypes.BaseType: pass elif member is configtypes.MappingType: pass elif member is configtypes.FormatString: yield functools.partial(member, fields=['a', 'b']) elif issubclass(member, configtypes.BaseType): yield member @pytest.mark.usefixtures('qapp', 'config_tmpdir') @pytest.mark.parametrize('klass', gen_classes()) @hypothesis.given(strategies.text()) @hypothesis.example('\x00') def test_configtypes_hypothesis(klass, s): if (klass in [configtypes.File, configtypes.UserStyleSheet] and sys.platform == 'linux' and not os.environ.get('DISPLAY', '')): pytest.skip("No DISPLAY available") try: klass().validate(s) except configexc.ValidationError: pass else: klass().transform(s)
1
15,384
What's the reason for excluding those from the fuzzing here? You probably could just add some inner type via `functools.partial` like below with `FormatString`.
qutebrowser-qutebrowser
py
@@ -3,6 +3,6 @@ require 'spec_helper' describe Api::V1::UsersController, '#show' do it 'returns a 401 when users are not authenticated' do get :show - response.code.should eq '401' + expect(response.code).to eq '401' end end
1
require 'spec_helper' describe Api::V1::UsersController, '#show' do it 'returns a 401 when users are not authenticated' do get :show response.code.should eq '401' end end
1
9,667
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -14,7 +14,7 @@ import ( ) func killContainer(container libcontainer.Container) error { - container.Signal(syscall.SIGKILL, false) + _ = container.Signal(syscall.SIGKILL, false) for i := 0; i < 100; i++ { time.Sleep(100 * time.Millisecond) if err := container.Signal(syscall.Signal(0), false); err != nil {
1
// +build !solaris package main import ( "fmt" "os" "path/filepath" "syscall" "time" "github.com/opencontainers/runc/libcontainer" "github.com/urfave/cli" ) func killContainer(container libcontainer.Container) error { container.Signal(syscall.SIGKILL, false) for i := 0; i < 100; i++ { time.Sleep(100 * time.Millisecond) if err := container.Signal(syscall.Signal(0), false); err != nil { destroy(container) return nil } } return fmt.Errorf("container init still running") } var deleteCommand = cli.Command{ Name: "delete", Usage: "delete any resources held by one or more containers often used with detached containers", ArgsUsage: `<container-id> [container-id...] Where "<container-id>" is the name for the instance of the container. EXAMPLE: For example, if the container id is "ubuntu01" and runc list currently shows the status of "ubuntu01" as "stopped" the following will delete resources held for "ubuntu01" removing "ubuntu01" from the runc list of containers: # runc delete ubuntu01`, Flags: []cli.Flag{ cli.BoolFlag{ Name: "force, f", Usage: "Forcibly deletes the container if it is still running (uses SIGKILL)", }, }, Action: func(context *cli.Context) error { if err := checkArgs(context, 1, minArgs); err != nil { return err } hasError := false factory, err := loadFactory(context) if err != nil { return err } for _, id := range context.Args() { container, err := factory.Load(id) if err != nil { if lerr, ok := err.(libcontainer.Error); ok && lerr.Code() == libcontainer.ContainerNotExists { // if there was an aborted start or something of the sort then the container's directory could exist but // libcontainer does not see it because the state.json file inside that directory was never created. path := filepath.Join(context.GlobalString("root"), id) if err := os.RemoveAll(path); err != nil { fmt.Fprintf(os.Stderr, "remove %s: %v\n", path, err) } fmt.Fprintf(os.Stderr, "container %s does not exist\n", id) } hasError = true continue } s, err := container.Status() if err != nil { fmt.Fprintf(os.Stderr, "status for %s: %v\n", id, err) hasError = true continue } switch s { case libcontainer.Stopped: destroy(container) case libcontainer.Created: err := killContainer(container) if err != nil { fmt.Fprintf(os.Stderr, "kill container %s: %v\n", id, err) hasError = true } default: if context.Bool("force") { err := killContainer(container) if err != nil { fmt.Fprintf(os.Stderr, "kill container %s: %v\n", id, err) hasError = true } } else { fmt.Fprintf(os.Stderr, "cannot delete container %s that is not stopped: %s\n", id, s) hasError = true } } } if hasError { return fmt.Errorf("one or more of the container deletions failed") } return nil }, }
1
13,746
Its this golint or govet stuff?
opencontainers-runc
go
@@ -232,10 +232,9 @@ insert(hash_table_t table, app_pc addr, cbr_state_t state) static void at_taken(app_pc src, app_pc targ) { - dr_mcontext_t mcontext = { - sizeof(mcontext), - DR_MC_ALL, - }; + dr_mcontext_t mcontext; + mcontext.size = sizeof(mcontext); + mcontext.flags = DR_MC_ALL; void *drcontext = dr_get_current_drcontext(); /*
1
/* ********************************************************** * Copyright (c) 2014-2019 Google, Inc. All rights reserved. * Copyright (c) 2008 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Code Manipulation API Sample: * cbr.c * * This sample shows how to update or replace instrumented code after * it executes. We focus on cbr instructions, inserting * instrumentation to record the fallthrough and taken addresses when * they first execute. After a particular branch first executes, we * re-instrument the basic block to remove the instrumentation for the * direction taken. If and when we see the other direction, we remove * all instrumentation for that branch. We design this sample to * avoid the instrumentation overhead for a particular direction until * it is taken. Furthermore, we remove all overhead for that * direction after it triggers. * * This sample might form part of a dynamic CFG builder, where we want * to record each control-flow edge, but we don't want to pay the * execution overhead of the instrumentation after we've noted the * edge. * * We use the following replacement scheme: * 1) In the BB event, insert instrumentation for both the taken and * fallthrough edges. * 2) When the BB executes, note the direction taken and flush the * fragment from the code cache. * 3) When the BB event triggers again, insert new instrumentation. */ #include "dr_api.h" #include "drmgr.h" #define MINSERT instrlist_meta_preinsert #define ASSERT(x) \ do { \ if (!(x)) { \ dr_printf("ASSERT failed on line %d", __LINE__); \ dr_flush_file(STDOUT); \ dr_abort(); \ } \ } while (0) /* We need a table to store the state of each cbr (i.e., "seen taken * edge", "seen fallthrough edge", or "seen both"). We'll use a * simple hash table. */ #define HASH_TABLE_SIZE 7919 /* Possible cbr states */ typedef enum { CBR_NEITHER = 0x00, CBR_TAKEN = 0x01, CBR_NOT_TAKEN = 0x10 } cbr_state_t; /* Each bucket in the hash table is a list of the following elements. * For each cbr, we store its address and its state. */ typedef struct _elem_t { struct _elem_t *next; cbr_state_t state; app_pc addr; } elem_t; typedef struct _list_t { elem_t *head; elem_t *tail; } list_t; /* We'll use one global hash table */ typedef list_t **hash_table_t; hash_table_t global_table = NULL; static elem_t * new_elem(app_pc addr, cbr_state_t state) { elem_t *elem = (elem_t *)dr_global_alloc(sizeof(elem_t)); ASSERT(elem != NULL); elem->next = NULL; elem->addr = addr; elem->state = state; return elem; } static void delete_elem(elem_t *elem) { dr_global_free(elem, sizeof(elem_t)); } static void append_elem(list_t *list, elem_t *elem) { if (list->head == NULL) { ASSERT(list->tail == NULL); list->head = elem; list->tail = elem; } else { list->tail->next = elem; list->tail = elem; } } static elem_t * find_elem(list_t *list, app_pc addr) { elem_t *elem = list->head; while (elem != NULL) { if (elem->addr == addr) return elem; elem = elem->next; } return NULL; } static list_t * new_list() { list_t *list = (list_t *)dr_global_alloc(sizeof(list_t)); list->head = NULL; list->tail = NULL; return list; } static void delete_list(list_t *list) { elem_t *iter = list->head; while (iter != NULL) { elem_t *next = iter->next; delete_elem(iter); iter = next; } dr_global_free(list, sizeof(list_t)); } hash_table_t new_table() { int i; hash_table_t table = (hash_table_t)dr_global_alloc(sizeof(list_t *) * HASH_TABLE_SIZE); for (i = 0; i < HASH_TABLE_SIZE; i++) { table[i] = NULL; } return table; } void delete_table(hash_table_t table) { int i; for (i = 0; i < HASH_TABLE_SIZE; i++) { if (table[i] != NULL) { delete_list(table[i]); } } dr_global_free(table, sizeof(list_t *) * HASH_TABLE_SIZE); } static uint hash_func(app_pc addr) { return ((uint)(((ptr_uint_t)addr) % HASH_TABLE_SIZE)); } elem_t * lookup(hash_table_t table, app_pc addr) { list_t *list = table[hash_func(addr)]; if (list != NULL) return find_elem(list, addr); return NULL; } void insert(hash_table_t table, app_pc addr, cbr_state_t state) { elem_t *elem = new_elem(addr, state); uint index = hash_func(addr); list_t *list = table[index]; if (list == NULL) { list = new_list(); table[index] = list; } append_elem(list, elem); } /* * End hash table implementation */ /* Clean call for the 'taken' case */ static void at_taken(app_pc src, app_pc targ) { dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; void *drcontext = dr_get_current_drcontext(); /* * Record the fact that we've seen the taken case. */ elem_t *elem = lookup(global_table, src); ASSERT(elem != NULL); elem->state |= CBR_TAKEN; /* Remove the bb from the cache so it will be re-built the next * time it executes. */ /* Since the flush will remove the fragment we're already in, * redirect execution to the target address. */ dr_flush_region(src, 1); dr_get_mcontext(drcontext, &mcontext); mcontext.pc = targ; dr_redirect_execution(&mcontext); } /* Clean call for the 'not taken' case */ static void at_not_taken(app_pc src, app_pc fall) { dr_mcontext_t mcontext = { sizeof(mcontext), DR_MC_ALL, }; void *drcontext = dr_get_current_drcontext(); /* * Record the fact that we've seen the not_taken case. */ elem_t *elem = lookup(global_table, src); ASSERT(elem != NULL); elem->state |= CBR_NOT_TAKEN; /* Remove the bb from the cache so it will be re-built the next * time it executes. */ /* Since the flush will remove the fragment we're already in, * redirect execution to the fallthrough address. */ dr_flush_region(src, 1); dr_get_mcontext(drcontext, &mcontext); mcontext.pc = fall; dr_redirect_execution(&mcontext); } static dr_emit_flags_t event_app_instruction(void *drcontext, void *tag, instrlist_t *bb, instr_t *instr, bool for_trace, bool translating, void *user_data) { cbr_state_t state; bool insert_taken, insert_not_taken; app_pc src; elem_t *elem; /* conditional branch only */ if (!instr_is_cbr(instr)) return DR_EMIT_DEFAULT; /* We can determine the target and fallthrough addresses here, but we * want to note the edge if and when it actually executes at runtime. * Instead of using dr_insert_cbr_instrumentation(), we'll insert * separate instrumentation for the taken and not taken cases and * remove the instrumentation for an edge after it executes. */ /* First look up the state of this branch so we * know what instrumentation to insert, if any. */ src = instr_get_app_pc(instr); elem = lookup(global_table, src); if (elem == NULL) { state = CBR_NEITHER; insert(global_table, src, CBR_NEITHER); } else { state = elem->state; } insert_taken = (state & CBR_TAKEN) == 0; insert_not_taken = (state & CBR_NOT_TAKEN) == 0; if (insert_taken || insert_not_taken) { app_pc fall = (app_pc)decode_next_pc(drcontext, (byte *)src); app_pc targ = instr_get_branch_target_pc(instr); /* Redirect the existing cbr to jump to a callout for * the 'taken' case. We'll insert a 'not-taken' * callout at the fallthrough address. */ instr_t *label = INSTR_CREATE_label(drcontext); /* should be meta, and meta-instrs shouldn't have translations */ instr_set_meta_no_translation(instr); /* it may not reach (in particular for x64) w/ our added clean call */ if (instr_is_cti_short(instr)) { /* if jecxz/loop we want to set the target of the long-taken * so set instr to the return value */ instr = instr_convert_short_meta_jmp_to_long(drcontext, bb, instr); } instr_set_target(instr, opnd_create_instr(label)); if (insert_not_taken) { /* Callout for the not-taken case. Insert after * the cbr (i.e., 3rd argument is NULL). */ dr_insert_clean_call(drcontext, bb, NULL, (void *)at_not_taken, false /* don't save fp state */, 2 /* 2 args for at_not_taken */, OPND_CREATE_INTPTR(src), OPND_CREATE_INTPTR(fall)); } /* After the callout, jump to the original fallthrough * address. Note that this is an exit cti, and should * not be a meta-instruction. Therefore, we use * preinsert instead of meta_preinsert, and we must * set the translation field. On Windows, this jump * and the final jump below never execute since the * at_taken and at_not_taken callouts redirect * execution and never return. However, since the API * expects clients to produced well-formed code, we * insert explicit exits from the block for Windows as * well as Linux. */ instrlist_preinsert( bb, NULL, INSTR_XL8(INSTR_CREATE_jmp(drcontext, opnd_create_pc(fall)), fall)); /* label goes before the 'taken' callout */ MINSERT(bb, NULL, label); if (insert_taken) { /* Callout for the taken case */ dr_insert_clean_call(drcontext, bb, NULL, (void *)at_taken, false /* don't save fp state */, 2 /* 2 args for at_taken */, OPND_CREATE_INTPTR(src), OPND_CREATE_INTPTR(targ)); } /* After the callout, jump to the original target * block (this should not be a meta-instruction). */ instrlist_preinsert( bb, NULL, INSTR_XL8(INSTR_CREATE_jmp(drcontext, opnd_create_pc(targ)), targ)); } /* since our added instrumentation is not constant, we ask to store * translations now */ return DR_EMIT_STORE_TRANSLATIONS; } void dr_exit(void) { #ifdef SHOW_RESULTS /* Print all the cbr's seen over the life of the process, and * whether we saw taken, not taken, or both. */ int i; for (i = 0; i < HASH_TABLE_SIZE; i++) { if (global_table[i] != NULL) { elem_t *iter; for (iter = global_table[i]->head; iter != NULL; iter = iter->next) { cbr_state_t state = iter->state; if (state == CBR_TAKEN) { dr_printf("" PFX ": taken\n", iter->addr); } else if (state == CBR_NOT_TAKEN) { dr_printf("" PFX ": not taken\n", iter->addr); } else { ASSERT(state == (CBR_TAKEN | CBR_NOT_TAKEN)); dr_printf("" PFX ": both\n", iter->addr); } } } } #endif delete_table(global_table); drmgr_exit(); } DR_EXPORT void dr_client_main(client_id_t id, int argc, const char *argv[]) { dr_set_client_name("DynamoRIO Sample Client 'cbr'", "http://dynamorio.org/issues"); if (!drmgr_init()) DR_ASSERT_MSG(false, "drmgr_init failed!"); global_table = new_table(); if (!drmgr_register_bb_instrumentation_event(NULL, event_app_instruction, NULL)) DR_ASSERT_MSG(false, "fail to register event_app_instruction!"); dr_register_exit_event(dr_exit); }
1
21,933
These are good cleanups: I didn't realize there were so many like this in the samples and tests. But given that there are quite of few of these mcontext changes I would separate these into their own PR (no need for an issue: iX branch) since they are logically distinct. Cleaner history, simpler revert paths, etc.
DynamoRIO-dynamorio
c
@@ -0,0 +1,18 @@ +// Copyright (c) Six Labors. +// Licensed under the Apache License, Version 2.0. + +using BenchmarkDotNet.Attributes; + +using SixLabors.ImageSharp.PixelFormats; + +namespace SixLabors.ImageSharp.Benchmarks +{ + public class ColorEquality + { + [Benchmark(Description = "ImageSharp Color Equals")] + public bool ColorEqual() + { + return new Rgba32(128, 128, 128, 128).Equals(new Rgba32(128, 128, 128, 128)); + } + } +}
1
1
12,802
This is just a random struct equality, no value here.
dotnet-performance
.cs
@@ -180,6 +180,7 @@ type Config struct { FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"` KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"` + NatPortRange numorstring.Port `config:"portrange;;local"` UsageReportingEnabled bool `config:"bool;true"` UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
1
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "errors" "fmt" "net" "os" "reflect" "regexp" "strconv" "strings" "time" log "github.com/sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/apiconfig" "github.com/projectcalico/libcalico-go/lib/names" "github.com/projectcalico/libcalico-go/lib/numorstring" ) var ( IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`) AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`) HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`) StringRegexp = regexp.MustCompile(`^.*$`) ) const ( maxUint = ^uint(0) maxInt = int(maxUint >> 1) minInt = -maxInt - 1 ) // Source of a config value. Values from higher-numbered sources override // those from lower-numbered sources. Note: some parameters (such as those // needed to connect to the datastore) can only be set from a local source. type Source uint8 const ( Default = iota DatastoreGlobal DatastorePerHost ConfigFile EnvironmentVariable ) var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal} func (source Source) String() string { switch source { case Default: return "<default>" case DatastoreGlobal: return "datastore (global)" case DatastorePerHost: return "datastore (per-host)" case ConfigFile: return "config file" case EnvironmentVariable: return "environment variable" } return fmt.Sprintf("<unknown(%v)>", uint8(source)) } func (source Source) Local() bool { switch source { case Default, ConfigFile, EnvironmentVariable: return true default: return false } } // Config contains the best, parsed config values loaded from the various sources. // We use tags to control the parsing and validation. type Config struct { // Configuration parameters. UseInternalDataplaneDriver bool `config:"bool;true"` DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"` DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"` FelixHostname string `config:"hostname;;local,non-zero"` EtcdAddr string `config:"authority;127.0.0.1:2379;local"` EtcdScheme string `config:"oneof(http,https);http;local"` EtcdKeyFile string `config:"file(must-exist);;local"` EtcdCertFile string `config:"file(must-exist);;local"` EtcdCaFile string `config:"file(must-exist);;local"` EtcdEndpoints []string `config:"endpoint-list;;local"` TyphaAddr string `config:"authority;;local"` TyphaK8sServiceName string `config:"string;;local"` TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"` TyphaReadTimeout time.Duration `config:"seconds;30;local"` TyphaWriteTimeout time.Duration `config:"seconds;10;local"` // Client-side TLS config for Felix's communication with Typha. If any of these are // specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left // unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present // a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN // matching TyphaURISAN. TyphaKeyFile string `config:"file(must-exist);;local"` TyphaCertFile string `config:"file(must-exist);;local"` TyphaCAFile string `config:"file(must-exist);;local"` TyphaCN string `config:"string;;local"` TyphaURISAN string `config:"string;;local"` Ipv6Support bool `config:"bool;true"` IgnoreLooseRPF bool `config:"bool;false"` RouteRefreshInterval time.Duration `config:"seconds;90"` IptablesRefreshInterval time.Duration `config:"seconds;90"` IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"` IptablesLockFilePath string `config:"file;/run/xtables.lock"` IptablesLockTimeoutSecs time.Duration `config:"seconds;0"` IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"` IpsetsRefreshInterval time.Duration `config:"seconds;10"` MaxIpsetSize int `config:"int;1048576;non-zero"` PolicySyncPathPrefix string `config:"file;;"` NetlinkTimeoutSecs time.Duration `config:"seconds;10"` MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"` MetadataPort int `config:"int(0,65535);8775;die-on-fail"` InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"` InterfaceExclude string `config:"iface-list;kube-ipvs0"` ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"` DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"` IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` LogPrefix string `config:"string;calico-packet"` LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"` LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` IpInIpEnabled bool `config:"bool;false"` IpInIpMtu int `config:"int;1440;non-zero"` IpInIpTunnelAddr net.IP `config:"ipv4;"` ReportingIntervalSecs time.Duration `config:"seconds;30"` ReportingTTLSecs time.Duration `config:"seconds;90"` EndpointReportingEnabled bool `config:"bool;false"` EndpointReportingDelaySecs time.Duration `config:"seconds;1"` IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"` DisableConntrackInvalidCheck bool `config:"bool;false"` HealthEnabled bool `config:"bool;false"` HealthPort int `config:"int(0,65535);9099"` HealthHost string `config:"string;localhost"` PrometheusMetricsEnabled bool `config:"bool;false"` PrometheusMetricsPort int `config:"int(0,65535);9091"` PrometheusGoMetricsEnabled bool `config:"bool;true"` PrometheusProcessMetricsEnabled bool `config:"bool;true"` FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"` FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"` KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"` UsageReportingEnabled bool `config:"bool;true"` UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"` UsageReportingIntervalSecs time.Duration `config:"seconds;86400"` ClusterGUID string `config:"string;baddecaf"` ClusterType string `config:"string;"` CalicoVersion string `config:"string;"` DebugMemoryProfilePath string `config:"file;;"` DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"` DebugDisableLogDropping bool `config:"bool;false"` DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"` DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"` // State tracking. // nameToSource tracks where we loaded each config param from. sourceToRawConfig map[Source]map[string]string rawValues map[string]string Err error } type ProtoPort struct { Protocol string Port uint16 } // Load parses and merges the rawData from one particular source into this config object. // If there is a config value already loaded from a higher-priority source, then // the new value will be ignored (after validation). func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) { log.Infof("Merging in config from %v: %v", source, rawData) // Defensively take a copy of the raw data, in case we've been handed // a mutable map by mistake. rawDataCopy := make(map[string]string) for k, v := range rawData { if v == "" { log.WithFields(log.Fields{ "name": k, "source": source, }).Info("Ignoring empty configuration parameter. Use value 'none' if " + "your intention is to explicitly disable the default value.") continue } rawDataCopy[k] = v } config.sourceToRawConfig[source] = rawDataCopy changed, err = config.resolve() return } func (c *Config) InterfacePrefixes() []string { return strings.Split(c.InterfacePrefix, ",") } func (c *Config) InterfaceExcludes() []string { return strings.Split(c.InterfaceExclude, ",") } func (config *Config) OpenstackActive() bool { if strings.Contains(strings.ToLower(config.ClusterType), "openstack") { // OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin // set this flag. log.Debug("Cluster type contains OpenStack") return true } // If we get here, either OpenStack isn't present or we're running against an old version // of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the // presence of the OpenStack-related parameters. if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" { log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active") return true } if config.MetadataPort != 0 && config.MetadataPort != 8775 { log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active") return true } for _, prefix := range config.InterfacePrefixes() { if prefix == "tap" { log.Debug("Interface prefix list contains 'tap', assuming OpenStack") return true } } log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases") return false } func (config *Config) resolve() (changed bool, err error) { newRawValues := make(map[string]string) nameToSource := make(map[string]Source) for _, source := range SourcesInDescendingOrder { valueLoop: for rawName, rawValue := range config.sourceToRawConfig[source] { currentSource := nameToSource[rawName] param, ok := knownParams[strings.ToLower(rawName)] if !ok { if source >= currentSource { // Stash the raw value in case it's useful for // a plugin. Since we don't know the canonical // name, use the raw name. newRawValues[rawName] = rawValue nameToSource[rawName] = source } log.WithField("raw name", rawName).Info( "Ignoring unknown config param.") continue valueLoop } metadata := param.GetMetadata() name := metadata.Name if metadata.Local && !source.Local() { log.Warningf("Ignoring local-only configuration for %v from %v", name, source) continue valueLoop } log.Infof("Parsing value for %v: %v (from %v)", name, rawValue, source) var value interface{} if strings.ToLower(rawValue) == "none" { // Special case: we allow a value of "none" to force the value to // the zero value for a field. The zero value often differs from // the default value. Typically, the zero value means "turn off // the feature". if metadata.NonZero { err = errors.New("Non-zero field cannot be set to none") log.Errorf( "Failed to parse value for %v: %v from source %v. %v", name, rawValue, source, err) config.Err = err return } value = metadata.ZeroValue log.Infof("Value set to 'none', replacing with zero-value: %#v.", value) } else { value, err = param.Parse(rawValue) if err != nil { logCxt := log.WithError(err).WithField("source", source) if metadata.DieOnParseFailure { logCxt.Error("Invalid (required) config value.") config.Err = err return } else { logCxt.WithField("default", metadata.Default).Warn( "Replacing invalid value with default") value = metadata.Default err = nil } } } log.Infof("Parsed value for %v: %v (from %v)", name, value, source) if source < currentSource { log.Infof("Skipping config value for %v from %v; "+ "already have a value from %v", name, source, currentSource) continue } field := reflect.ValueOf(config).Elem().FieldByName(name) field.Set(reflect.ValueOf(value)) newRawValues[name] = rawValue nameToSource[name] = source } } changed = !reflect.DeepEqual(newRawValues, config.rawValues) config.rawValues = newRawValues return } func (config *Config) setBy(name string, source Source) bool { _, set := config.sourceToRawConfig[source][name] return set } func (config *Config) setByConfigFileOrEnvironment(name string) bool { return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable) } func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig { // We want Felix's datastore connection to be fully configurable using the same // CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go // client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a // long time supported FELIX_XXXYYY environment variables, and we want those to keep working // too. // To achieve that, first build a CalicoAPIConfig using libcalico-go's // LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY // and XXX_YYY variables. cfg, err := apiconfig.LoadClientConfigFromEnvironment() if err != nil { log.WithError(err).Panic("Failed to create datastore config") } // Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the // etcd case. if config.setByConfigFileOrEnvironment("DatastoreType") && config.DatastoreType == "etcdv3" { cfg.Spec.DatastoreType = apiconfig.EtcdV3 // Endpoints. if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 { cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",") } else if config.setByConfigFileOrEnvironment("EtcdAddr") { cfg.Spec.EtcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr } // TLS. if config.setByConfigFileOrEnvironment("EtcdKeyFile") { cfg.Spec.EtcdKeyFile = config.EtcdKeyFile } if config.setByConfigFileOrEnvironment("EtcdCertFile") { cfg.Spec.EtcdCertFile = config.EtcdCertFile } if config.setByConfigFileOrEnvironment("EtcdCaFile") { cfg.Spec.EtcdCACertFile = config.EtcdCaFile } } if !config.IpInIpEnabled { // Polling k8s for node updates is expensive (because we get many superfluous // updates) so disable if we don't need it. log.Info("IPIP disabled, disabling node poll (if KDD is in use).") cfg.Spec.K8sDisableNodePoll = true } return *cfg } // Validate() performs cross-field validation. func (config *Config) Validate() (err error) { if config.FelixHostname == "" { err = errors.New("Failed to determine hostname") } if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 { if config.EtcdScheme == "" { err = errors.New("EtcdEndpoints and EtcdScheme both missing") } if config.EtcdAddr == "" { err = errors.New("EtcdEndpoints and EtcdAddr both missing") } } // If any client-side TLS config parameters are specified, they _all_ must be - except that // either TyphaCN or TyphaURISAN may be left unset. if config.TyphaCAFile != "" || config.TyphaCertFile != "" || config.TyphaKeyFile != "" || config.TyphaCN != "" || config.TyphaURISAN != "" { // Some TLS config specified. if config.TyphaKeyFile == "" || config.TyphaCertFile == "" || config.TyphaCAFile == "" || (config.TyphaCN == "" && config.TyphaURISAN == "") { err = errors.New("If any Felix-Typha TLS config parameters are specified," + " they _all_ must be" + " - except that either TyphaCN or TyphaURISAN may be left unset.") } } if err != nil { config.Err = err } return } var knownParams map[string]param func loadParams() { knownParams = make(map[string]param) config := Config{} kind := reflect.TypeOf(config) metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` + `([^;]*)(?:;` + `([^;]*))?$`) for ii := 0; ii < kind.NumField(); ii++ { field := kind.Field(ii) tag := field.Tag.Get("config") if tag == "" { continue } captures := metaRegexp.FindStringSubmatch(tag) if len(captures) == 0 { log.Panicf("Failed to parse metadata for config param %v", field.Name) } log.Debugf("%v: metadata captures: %#v", field.Name, captures) kind := captures[1] // Type: "int|oneof|bool|port-list|..." kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https" defaultStr := captures[3] // Default value e.g "1.0" flags := captures[4] var param param var err error switch kind { case "bool": param = &BoolParam{} case "int": min := minInt max := maxInt if kindParams != "" { minAndMax := strings.Split(kindParams, ",") min, err = strconv.Atoi(minAndMax[0]) if err != nil { log.Panicf("Failed to parse min value for %v", field.Name) } max, err = strconv.Atoi(minAndMax[1]) if err != nil { log.Panicf("Failed to parse max value for %v", field.Name) } } param = &IntParam{Min: min, Max: max} case "int32": param = &Int32Param{} case "mark-bitmask": param = &MarkBitmaskParam{} case "float": param = &FloatParam{} case "seconds": param = &SecondsParam{} case "millis": param = &MillisParam{} case "iface-list": param = &RegexpParam{Regexp: IfaceListRegexp, Msg: "invalid Linux interface name"} case "file": param = &FileParam{ MustExist: strings.Contains(kindParams, "must-exist"), Executable: strings.Contains(kindParams, "executable"), } case "authority": param = &RegexpParam{Regexp: AuthorityRegexp, Msg: "invalid URL authority"} case "ipv4": param = &Ipv4Param{} case "endpoint-list": param = &EndpointListParam{} case "port-list": param = &PortListParam{} case "portrange-list": param = &PortRangeListParam{} case "hostname": param = &RegexpParam{Regexp: HostnameRegexp, Msg: "invalid hostname"} case "oneof": options := strings.Split(kindParams, ",") lowerCaseToCanon := make(map[string]string) for _, option := range options { lowerCaseToCanon[strings.ToLower(option)] = option } param = &OneofListParam{ lowerCaseOptionsToCanonical: lowerCaseToCanon} case "string": param = &RegexpParam{Regexp: StringRegexp, Msg: "invalid string"} default: log.Panicf("Unknown type of parameter: %v", kind) } metadata := param.GetMetadata() metadata.Name = field.Name metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface() if strings.Index(flags, "non-zero") > -1 { metadata.NonZero = true } if strings.Index(flags, "die-on-fail") > -1 { metadata.DieOnParseFailure = true } if strings.Index(flags, "local") > -1 { metadata.Local = true } if defaultStr != "" { if strings.Index(flags, "skip-default-validation") > -1 { metadata.Default = defaultStr } else { // Parse the default value and save it in the metadata. Doing // that here ensures that we syntax-check the defaults now. defaultVal, err := param.Parse(defaultStr) if err != nil { log.Panicf("Invalid default value: %v", err) } metadata.Default = defaultVal } } else { metadata.Default = metadata.ZeroValue } knownParams[strings.ToLower(field.Name)] = param } } func (config *Config) RawValues() map[string]string { return config.rawValues } func New() *Config { if knownParams == nil { loadParams() } p := &Config{ rawValues: make(map[string]string), sourceToRawConfig: make(map[Source]map[string]string), } for _, param := range knownParams { param.setDefault(p) } hostname, err := names.Hostname() if err != nil { log.Warningf("Failed to get hostname from kernel, "+ "trying HOSTNAME variable: %v", err) hostname = strings.ToLower(os.Getenv("HOSTNAME")) } p.FelixHostname = hostname return p } type param interface { GetMetadata() *Metadata Parse(raw string) (result interface{}, err error) setDefault(*Config) }
1
16,561
Just spotted the `local` on here; that shouldn't be needed - no reason to limit this config to env vars only
projectcalico-felix
c
@@ -8,8 +8,12 @@ except ImportError: import numpy as np import param -from ..dimension import redim -from ..util import unique_iterator +from ..dimension import redim, Dimension, process_dimensions +from ..element import Element +from ..ndmapping import OrderedDict +from ..spaces import HoloMap, DynamicMap +from ..util import (basestring, dimension_range as d_range, get_param_values, + isfinite, process_ellipses, unique_iterator, wrap_tuple) from .interface import Interface, iloc, ndloc from .array import ArrayInterface from .dictionary import DictInterface
1
from __future__ import absolute_import try: import itertools.izip as zip except ImportError: pass import numpy as np import param from ..dimension import redim from ..util import unique_iterator from .interface import Interface, iloc, ndloc from .array import ArrayInterface from .dictionary import DictInterface from .grid import GridInterface from .multipath import MultiInterface # noqa (API import) from .image import ImageInterface # noqa (API import) default_datatype = 'dictionary' datatypes = ['dictionary', 'grid'] try: import pandas as pd # noqa (Availability import) from .pandas import PandasInterface default_datatype = 'dataframe' datatypes = ['dataframe', 'dictionary', 'grid'] DFColumns = PandasInterface except ImportError: pd = None except Exception as e: pd = None param.main.warning('Pandas interface failed to import with ' 'following error: %s' % e) try: import xarray # noqa (Availability import) from .xarray import XArrayInterface # noqa (Conditional API import) datatypes.append('xarray') except ImportError: pass try: from .dask import DaskInterface # noqa (Conditional API import) datatypes.append('dask') except ImportError: pass if 'array' not in datatypes: datatypes.append('array') from ..dimension import Dimension, process_dimensions from ..element import Element from ..ndmapping import OrderedDict from ..spaces import HoloMap, DynamicMap from .. import util def concat(datasets, datatype=None): """ Concatenates multiple datasets wrapped in an NdMapping type along all of its dimensions. Before concatenation all datasets are cast to the same datatype, which may be explicitly defined or implicitly derived from the first datatype that is encountered. For columnar data concatenation adds the columns for the dimensions being concatenated along and then concatenates all the old and new columns. For gridded data a new axis is created for each dimension being concatenated along and then hierarchically concatenates along each dimension. Signature --------- datasets: NdMapping of Datasets defining dimensions to concatenate on datatype: Datatype to cast data to before concatenation Returns: Dataset """ return Interface.concatenate(datasets, datatype) class DataConversion(object): """ DataConversion is a very simple container object which can be given an existing Dataset Element and provides methods to convert the Dataset into most other Element types. """ def __init__(self, element): self._element = element def __call__(self, new_type, kdims=None, vdims=None, groupby=None, sort=False, **kwargs): """ Generic conversion method for Dataset based Element types. Supply the Dataset Element type to convert to and optionally the key dimensions (kdims), value dimensions (vdims) and the dimensions. to group over. Converted Columns can be automatically sorted via the sort option and kwargs can be passed through. """ if 'mdims' in kwargs: if groupby: raise ValueError('Cannot supply both mdims and groupby') else: self._element.warning("'mdims' keyword has been renamed " "to 'groupby'; the name mdims is " "deprecated and will be removed " "after version 1.7.") groupby = kwargs.pop('mdims') element_params = new_type.params() kdim_param = element_params['kdims'] vdim_param = element_params['vdims'] if isinstance(kdim_param.bounds[1], int): ndim = min([kdim_param.bounds[1], len(kdim_param.default)]) else: ndim = None nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None if kdims is None: kd_filter = groupby or [] if not isinstance(kd_filter, list): kd_filter = [groupby] kdims = [kd for kd in self._element.kdims if kd not in kd_filter][:ndim] elif kdims and not isinstance(kdims, list): kdims = [kdims] if vdims is None: vdims = [d for d in self._element.vdims if d not in kdims][:nvdim] if vdims and not isinstance(vdims, list): vdims = [vdims] # Checks Element type supports dimensionality type_name = new_type.__name__ for dim_type, dims in (('kdims', kdims), ('vdims', vdims)): min_d, max_d = new_type.params(dim_type).bounds if ((min_d is not None and len(dims) < min_d) or (max_d is not None and len(dims) > max_d)): raise ValueError("%s %s must be between length %s and %s." % (type_name, dim_type, min_d, max_d)) if groupby is None: groupby = [d for d in self._element.kdims if d not in kdims+vdims] elif groupby and not isinstance(groupby, list): groupby = [groupby] if self._element.interface.gridded: dropped_kdims = [kd for kd in self._element.kdims if kd not in groupby+kdims] if dropped_kdims: selected = self._element.reindex(groupby+kdims, vdims) else: selected = self._element else: if pd and issubclass(self._element.interface, PandasInterface): ds_dims = self._element.dimensions() ds_kdims = [self._element.get_dimension(d) if d in ds_dims else d for d in groupby+kdims] ds_vdims = [self._element.get_dimension(d) if d in ds_dims else d for d in vdims] selected = self._element.clone(kdims=ds_kdims, vdims=ds_vdims) else: selected = self._element.reindex(groupby+kdims, vdims) params = {'kdims': [selected.get_dimension(kd, strict=True) for kd in kdims], 'vdims': [selected.get_dimension(vd, strict=True) for vd in vdims], 'label': selected.label} if selected.group != selected.params()['group'].default: params['group'] = selected.group params.update(kwargs) if len(kdims) == selected.ndims or not groupby: element = new_type(selected, **params) return element.sort() if sort else element group = selected.groupby(groupby, container_type=HoloMap, group_type=new_type, **params) if sort: return group.map(lambda x: x.sort(), [new_type]) else: return group class Dataset(Element): """ Dataset provides a general baseclass for Element types that contain structured data and supports a range of data formats. The Dataset class supports various methods offering a consistent way of working with the stored data regardless of the storage format used. These operations include indexing, selection and various ways of aggregating or collapsing the data with a supplied function. """ datatype = param.List(datatypes, doc=""" A priority list of the data types to be used for storage on the .data attribute. If the input supplied to the element constructor cannot be put into the requested format, the next format listed will be used until a suitable format is found (or the data fails to be understood).""") group = param.String(default='Dataset', constant=True) # In the 1D case the interfaces should not automatically add x-values # to supplied data _auto_indexable_1d = False # Define a class used to transform Datasets into other Element types _conversion_interface = DataConversion # Whether the key dimensions are specified as bins _binned = False _vdim_reductions = {} _kdim_reductions = {} def __init__(self, data, kdims=None, vdims=None, **kwargs): if isinstance(data, Element): pvals = util.get_param_values(data) kwargs.update([(l, pvals[l]) for l in ['group', 'label'] if l in pvals and l not in kwargs]) kwargs.update(process_dimensions(kdims, vdims)) kdims, vdims = kwargs.get('kdims'), kwargs.get('vdims') validate_vdims = kwargs.pop('_validate_vdims', True) initialized = Interface.initialize(type(self), data, kdims, vdims, datatype=kwargs.get('datatype')) (data, self.interface, dims, extra_kws) = initialized super(Dataset, self).__init__(data, **dict(kwargs, **dict(dims, **extra_kws))) self.interface.validate(self, validate_vdims) self.redim = redim(self, mode='dataset') def closest(self, coords=[], **kwargs): """ Given a single coordinate or multiple coordinates as a tuple or list of tuples or keyword arguments matching the dimension closest will find the closest actual x/y coordinates. Different Element types should implement this appropriately depending on the space they represent, if the Element does not support snapping raise NotImplementedError. """ if self.ndims > 1: raise NotImplementedError("Closest method currently only " "implemented for 1D Elements") if kwargs: if len(kwargs) > 1: raise NotImplementedError("Closest method currently only " "supports 1D indexes") samples = list(kwargs.values())[0] coords = samples if isinstance(samples, list) else [samples] xs = self.dimension_values(0) if xs.dtype.kind in 'SO': raise NotImplementedError("Closest only supported for numeric types") idxs = [np.argmin(np.abs(xs-coord)) for coord in coords] return [xs[idx] for idx in idxs] def sort(self, by=[], reverse=False): """ Sorts the data by the values along the supplied dimensions. """ if not by: by = self.kdims if not isinstance(by, list): by = [by] sorted_columns = self.interface.sort(self, by, reverse) return self.clone(sorted_columns) def range(self, dim, data_range=True, dimension_range=True): """ Returns the range of values along the specified dimension. dimension: str/int/Dimension The dimension to compute the range on. data_range: bool Whether the range should include the data range or only the dimension ranges dimension_range: Whether to compute the range including the Dimension range and soft_range """ dim = self.get_dimension(dim) if dim is None or (not data_range and not dimension_range): return (None, None) elif all(util.isfinite(v) for v in dim.range) and dimension_range: return dim.range elif dim in self.dimensions() and data_range and len(self): lower, upper = self.interface.range(self, dim) else: lower, upper = (np.NaN, np.NaN) if not dimension_range: return lower, upper return util.dimension_range(lower, upper, dim.range, dim.soft_range) def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs): """ Create a new object with an additional key dimensions. Requires the dimension name or object, the desired position in the key dimensions and a key value scalar or sequence of the same length as the existing keys. """ if isinstance(dimension, (util.basestring, tuple)): dimension = Dimension(dimension) if dimension.name in self.kdims: raise Exception('{dim} dimension already defined'.format(dim=dimension.name)) if vdim: dims = self.vdims[:] dims.insert(dim_pos, dimension) dimensions = dict(vdims=dims) dim_pos += self.ndims else: dims = self.kdims[:] dims.insert(dim_pos, dimension) dimensions = dict(kdims=dims) if issubclass(self.interface, ArrayInterface) and np.asarray(dim_val).dtype != self.data.dtype: element = self.clone(datatype=[default_datatype]) data = element.interface.add_dimension(element, dimension, dim_pos, dim_val, vdim) else: data = self.interface.add_dimension(self, dimension, dim_pos, dim_val, vdim) return self.clone(data, **dimensions) def select(self, selection_specs=None, **selection): """ Allows selecting data by the slices, sets and scalar values along a particular dimension. The indices should be supplied as keywords mapping between the selected dimension and value. Additionally selection_specs (taking the form of a list of type.group.label strings, types or functions) may be supplied, which will ensure the selection is only applied if the specs match the selected object. """ selection = {dim: sel for dim, sel in selection.items() if dim in self.dimensions()+['selection_mask']} if (selection_specs and not any(self.matches(sp) for sp in selection_specs) or not selection): return self data = self.interface.select(self, **selection) if np.isscalar(data): return data else: return self.clone(data) def reindex(self, kdims=None, vdims=None): """ Create a new object with a re-ordered set of dimensions. Allows converting key dimensions to value dimensions and vice versa. """ gridded = self.interface.gridded scalars = [] if gridded: coords = [(d, self.interface.coords(self, d.name)) for d in self.kdims] scalars = [d for d, vs in coords if len(vs) == 1] if kdims is None: # If no key dimensions are defined and interface is gridded # drop all scalar key dimensions key_dims = [d for d in self.kdims if (not vdims or d not in vdims) and not d in scalars] else: key_dims = [self.get_dimension(k, strict=True) for k in kdims] dropped = [d for d in self.kdims if not d in key_dims and not d in scalars] new_type = None if vdims is None: val_dims = [d for d in self.vdims if not kdims or d not in kdims] else: val_dims = [self.get_dimension(v, strict=True) for v in vdims] new_type = self._vdim_reductions.get(len(val_dims), type(self)) data = self.interface.reindex(self, key_dims, val_dims) datatype = self.datatype if gridded and dropped: datatype = [dt for dt in datatype if not self.interface.interfaces[dt].gridded] return self.clone(data, kdims=key_dims, vdims=val_dims, new_type=new_type, datatype=datatype) def __getitem__(self, slices): """ Allows slicing and selecting values in the Dataset object. Supports multiple indexing modes: (1) Slicing and indexing along the values of each dimension in the columns object using either scalars, slices or sets of values. (2) Supplying the name of a dimension as the first argument will return the values along that dimension as a numpy array. (3) Slicing of all key dimensions and selecting a single value dimension by name. (4) A boolean array index matching the length of the Dataset object. """ slices = util.process_ellipses(self, slices, vdim_selection=True) if isinstance(slices, np.ndarray) and slices.dtype.kind == 'b': if not len(slices) == len(self): raise IndexError("Boolean index must match length of sliced object") return self.clone(self.select(selection_mask=slices)) elif slices in [(), Ellipsis]: return self if not isinstance(slices, tuple): slices = (slices,) value_select = None if len(slices) == 1 and slices[0] in self.dimensions(): return self.dimension_values(slices[0]) elif len(slices) == self.ndims+1 and slices[self.ndims] in self.dimensions(): selection = dict(zip(self.dimensions('key', label=True), slices)) value_select = slices[self.ndims] elif len(slices) == self.ndims+1 and isinstance(slices[self.ndims], (Dimension,str)): raise IndexError("%r is not an available value dimension" % slices[self.ndims]) else: selection = dict(zip(self.dimensions(label=True), slices)) data = self.select(**selection) if value_select: if data.shape[0] == 1: return data[value_select][0] else: return data.reindex(vdims=[value_select]) return data def sample(self, samples=[], closest=True, **kwargs): """ Allows sampling of Dataset as an iterator of coordinates matching the key dimensions, returning a new object containing just the selected samples. Alternatively may supply kwargs to sample a coordinate on an object. By default it will attempt to snap to the nearest coordinate if the Element supports it, snapping may be disabled with the closest argument. """ if kwargs and samples: raise Exception('Supply explicit list of samples or kwargs, not both.') elif kwargs: sample = [slice(None) for _ in range(self.ndims)] for dim, val in kwargs.items(): sample[self.get_dimension_index(dim)] = val samples = [tuple(sample)] # Note: Special handling sampling of gridded 2D data as Curve # may be replaced with more general handling # see https://github.com/ioam/holoviews/issues/1173 from ...element import Table, Curve if len(samples) == 1: sel = {kd.name: s for kd, s in zip(self.kdims, samples[0])} dims = [kd for kd, v in sel.items() if not np.isscalar(v)] selection = self.select(**sel) # If a 1D cross-section of 2D space return Curve if self.interface.gridded and self.ndims == 2 and len(dims) == 1: new_type = Curve kdims = [self.get_dimension(kd) for kd in dims] else: new_type = Table kdims = self.kdims if np.isscalar(selection): selection = [samples[0]+(selection,)] else: reindexed = selection.clone(new_type=Dataset).reindex(kdims) selection = tuple(reindexed.columns(kdims+self.vdims).values()) datatype = list(util.unique_iterator(self.datatype+['dataframe', 'dict'])) return self.clone(selection, kdims=kdims, new_type=new_type, datatype=datatype) lens = set(len(util.wrap_tuple(s)) for s in samples) if len(lens) > 1: raise IndexError('Sample coordinates must all be of the same length.') if closest: try: samples = self.closest(samples) except NotImplementedError: pass samples = [util.wrap_tuple(s) for s in samples] return self.clone(self.interface.sample(self, samples), new_type=Table) def reduce(self, dimensions=[], function=None, spreadfn=None, **reduce_map): """ Allows reducing the values along one or more key dimension with the supplied function. The dimensions may be supplied as a list and a function to apply or a mapping between the dimensions and functions to apply along each dimension. """ if any(dim in self.vdims for dim in dimensions): raise Exception("Reduce cannot be applied to value dimensions") function, dims = self._reduce_map(dimensions, function, reduce_map) dims = [d for d in self.kdims if d not in dims] return self.aggregate(dims, function, spreadfn) def aggregate(self, dimensions=None, function=None, spreadfn=None, **kwargs): """ Aggregates over the supplied key dimensions with the defined function. """ if function is None: raise ValueError("The aggregate method requires a function to be specified") if dimensions is None: dimensions = self.kdims elif not isinstance(dimensions, list): dimensions = [dimensions] kdims = [self.get_dimension(d, strict=True) for d in dimensions] if not len(self): if spreadfn: spread_name = spreadfn.__name__ vdims = [d for vd in self.vdims for d in [vd, vd('_'.join([vd.name, spread_name]))]] else: vdims = self.vdims return self.clone([], kdims=kdims, vdims=vdims) aggregated = self.interface.aggregate(self, kdims, function, **kwargs) aggregated = self.interface.unpack_scalar(self, aggregated) ndims = len(dimensions) min_d, max_d = self.params('kdims').bounds generic_type = (min_d is not None and ndims < min_d) or (max_d is not None and ndims > max_d) vdims = self.vdims if spreadfn: error = self.interface.aggregate(self, dimensions, spreadfn) spread_name = spreadfn.__name__ ndims = len(vdims) error = self.clone(error, kdims=kdims, new_type=Dataset) combined = self.clone(aggregated, kdims=kdims, new_type=Dataset) for i, d in enumerate(vdims): dim = d('_'.join([d.name, spread_name])) dvals = error.dimension_values(d, flat=False) combined = combined.add_dimension(dim, ndims+i, dvals, True) return combined.clone(new_type=Dataset if generic_type else type(self)) if np.isscalar(aggregated): return aggregated else: try: # Should be checking the dimensions declared on the element are compatible return self.clone(aggregated, kdims=kdims, vdims=vdims) except: datatype = self.params('datatype').default return self.clone(aggregated, kdims=kdims, vdims=vdims, new_type=Dataset if generic_type else None, datatype=datatype) def groupby(self, dimensions=[], container_type=HoloMap, group_type=None, dynamic=False, **kwargs): """Return the results of a groupby operation over the specified dimensions as an object of type container_type (expected to be dictionary-like). Keys vary over the columns (dimensions) and the corresponding values are collections of group_type (e.g an Element, list, tuple) constructed with kwargs (if supplied). If dynamic is requested container_type is automatically set to a DynamicMap, allowing dynamic exploration of large datasets. If the data does not represent a full cartesian grid of the requested dimensions some Elements will be empty. """ if not isinstance(dimensions, list): dimensions = [dimensions] if not len(dimensions): dimensions = self.dimensions('key', True) if group_type is None: group_type = type(self) dimensions = [self.get_dimension(d, strict=True) for d in dimensions] dim_names = [d.name for d in dimensions] if dynamic: group_dims = [kd for kd in self.kdims if kd not in dimensions] kdims = [self.get_dimension(d) for d in kwargs.pop('kdims', group_dims)] drop_dim = len(group_dims) != len(kdims) group_kwargs = dict(util.get_param_values(self), kdims=kdims) group_kwargs.update(kwargs) def load_subset(*args): constraint = dict(zip(dim_names, args)) group = self.select(**constraint) if np.isscalar(group): return group_type(([group],), group=self.group, label=self.label, vdims=self.vdims) data = group.reindex(kdims) if drop_dim and self.interface.gridded: data = data.columns() return group_type(data, **group_kwargs) dynamic_dims = [d(values=list(self.interface.values(self, d.name, False))) for d in dimensions] return DynamicMap(load_subset, kdims=dynamic_dims) return self.interface.groupby(self, dim_names, container_type, group_type, **kwargs) def __len__(self): """ Returns the number of rows in the Dataset object. """ return self.interface.length(self) def __nonzero__(self): return self.interface.nonzero(self) __bool__ = __nonzero__ @property def shape(self): "Returns the shape of the data." return self.interface.shape(self) def dimension_values(self, dim, expanded=True, flat=True): """ Returns the values along a particular dimension. If unique values are requested will return only unique values. """ dim = self.get_dimension(dim, strict=True) return self.interface.values(self, dim, expanded, flat) def get_dimension_type(self, dim): """ Returns the specified Dimension type if specified or if the dimension_values types are consistent otherwise None is returned. """ dim_obj = self.get_dimension(dim) if dim_obj and dim_obj.type is not None: return dim_obj.type return self.interface.dimension_type(self, dim_obj) def dframe(self, dimensions=None): """ Returns the data in the form of a DataFrame. Supplying a list of dimensions filters the dataframe. If the data is already a DataFrame a copy is returned. """ if dimensions: dimensions = [self.get_dimension(d, strict=True).name for d in dimensions] return self.interface.dframe(self, dimensions) def columns(self, dimensions=None): if dimensions is None: dimensions = self.dimensions() else: dimensions = [self.get_dimension(d, strict=True) for d in dimensions] return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions]) @property def to(self): """ Property to create a conversion interface with methods to convert to other Element types. """ return self._conversion_interface(self) def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): """ Returns a clone of the object with matching parameter values containing the specified args and kwargs. If shared_data is set to True and no data explicitly supplied, the clone will share data with the original. May also supply a new_type, which will inherit all shared parameters. """ if 'datatype' not in overrides: datatypes = [self.interface.datatype] + self.datatype overrides['datatype'] = list(unique_iterator(datatypes)) return super(Dataset, self).clone(data, shared_data, new_type, *args, **overrides) @property def iloc(self): """ Returns an iloc object providing a convenient interface to slice and index into the Dataset using row and column indices. Allow selection by integer index, slice and list of integer indices and boolean arrays. Examples: * Index the first row and column: dataset.iloc[0, 0] * Select rows 1 and 2 with a slice: dataset.iloc[1:3, :] * Select with a list of integer coordinates: dataset.iloc[[0, 2, 3]] """ return iloc(self) @property def ndloc(self): """ Returns an ndloc object providing nd-array like indexing for gridded datasets. Follows NumPy array indexing conventions, allowing for indexing, slicing and selecting a list of indices on multi-dimensional arrays using integer indices. The order of array indices is inverted relative to the Dataset key dimensions, e.g. an Image with key dimensions 'x' and 'y' can be indexed with ``image.ndloc[iy, ix]``, where ``iy`` and ``ix`` are integer indices along the y and x dimensions. Examples: * Index value in 2D array: dataset.ndloc[3, 1] * Slice along y-axis of 2D array: dataset.ndloc[2:5, :] * Vectorized (non-orthogonal) indexing along x- and y-axes: dataset.ndloc[[1, 2, 3], [0, 2, 3]] """ return ndloc(self) # Aliases for pickle backward compatibility Columns = Dataset ArrayColumns = ArrayInterface DictColumns = DictInterface GridColumns = GridInterface
1
21,632
I had some weird issues when importing ``from .. import util`` getting the wrong utilities, hence I did this.
holoviz-holoviews
py
@@ -1,8 +1,8 @@ -#NVDAObjects/WinConsole.py -#A part of NonVisual Desktop Access (NVDA) -#This file is covered by the GNU General Public License. -#See the file COPYING for more details. -#Copyright (C) 2007-2019 NV Access Limited, Bill Dengler +# NVDAObjects/WinConsole.py +# A part of NonVisual Desktop Access (NVDA) +# This file is covered by the GNU General Public License. +# See the file COPYING for more details. +# Copyright (C) 2007-2020 NV Access Limited, Bill Dengler import winConsoleHandler from . import Window
1
#NVDAObjects/WinConsole.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2007-2019 NV Access Limited, Bill Dengler import winConsoleHandler from . import Window from ..behaviors import Terminal, EditableTextWithoutAutoSelectDetection, KeyboardHandlerBasedTypedCharSupport import api import core from scriptHandler import script import speech class WinConsole(Terminal, EditableTextWithoutAutoSelectDetection, Window): """ NVDA's legacy Windows Console support. This is used in situations where UIA isn't available. Please consider using NVDAObjects.UIA.winConsoleUIA instead. """ STABILIZE_DELAY = 0.03 def initOverlayClass(self): # Legacy consoles take quite a while to send textChange events. # This significantly impacts typing performance, so don't queue chars. if isinstance(self, KeyboardHandlerBasedTypedCharSupport): self._supportsTextChange = False def _get_windowThreadID(self): # #10113: Windows forces the thread of console windows to match the thread of the first attached process. # However, To correctly handle speaking of typed characters, # NVDA really requires the real thread the window was created in, # I.e. a thread inside conhost. from IAccessibleHandler.internalWinEventHandler import consoleWindowsToThreadIDs threadID = consoleWindowsToThreadIDs.get(self.windowHandle, 0) if not threadID: threadID = super().windowThreadID return threadID def _get_TextInfo(self): consoleObject=winConsoleHandler.consoleObject if consoleObject and self.windowHandle == consoleObject.windowHandle: return winConsoleHandler.WinConsoleTextInfo return super(WinConsole,self).TextInfo def event_becomeNavigatorObject(self, isFocus=False): if winConsoleHandler.consoleObject is not self: if winConsoleHandler.consoleObject: winConsoleHandler.disconnectConsole() winConsoleHandler.connectConsole(self) if self == api.getFocusObject(): # The user is returning to the focus object with object navigation. # The focused console should always be monitored if possible. self.startMonitoring() super(WinConsole,self).event_becomeNavigatorObject(isFocus=isFocus) def event_gainFocus(self): if winConsoleHandler.consoleObject is not self: if winConsoleHandler.consoleObject: winConsoleHandler.disconnectConsole() winConsoleHandler.connectConsole(self) super(WinConsole, self).event_gainFocus() def event_loseFocus(self): super(WinConsole, self).event_loseFocus() if winConsoleHandler.consoleObject is self: winConsoleHandler.disconnectConsole() def event_nameChange(self): pass def _getTextLines(self): return winConsoleHandler.getConsoleVisibleLines() def script_caret_backspaceCharacter(self, gesture): super(WinConsole, self).script_caret_backspaceCharacter(gesture) # #2586: We use console update events for typed characters, # so the typedCharacter event is never fired for the backspace key. # Call it here so that speak typed words works as expected. self.event_typedCharacter(u"\b") def script_close(self,gesture): # #5343: New consoles in Windows 10 close with alt+f4 and take any processes attached with it (including NVDA). # Therefore detach from the console temporarily while sending the gesture. winConsoleHandler.disconnectConsole() gesture.send() def reconnect(): if api.getFocusObject()==self: winConsoleHandler.connectConsole(self) self.startMonitoring() core.callLater(200,reconnect) @script(gestures=[ "kb:enter", "kb:numpadEnter", "kb:tab", "kb:control+c", "kb:control+d", "kb:control+pause" ]) def script_flush_queuedChars(self, gesture): """ Flushes the typed word buffer if present. Since these gestures clear the current word/line, we should flush the current words buffer to avoid erroneously reporting words that already have been processed. """ gesture.send() speech.clearTypedWordBuffer() __gestures={ "kb:alt+f4":"close", }
1
30,282
Please remove this line.
nvaccess-nvda
py
@@ -324,7 +324,8 @@ module Beaker # exit codes at the host level and then raising... # is it necessary to break execution?? if options[:accept_all_exit_codes] && options[:acceptable_exit_codes] - @logger.warn ":accept_all_exit_codes & :acceptable_exit_codes set. :accept_all_exit_codes overrides, but they shouldn't both be set at once" + @logger.warn ":accept_all_exit_codes & :acceptable_exit_codes set. :acceptable_exit_codes overrides, but they shouldn't both be set at once" + options[:accept_all_exit_codes] = false end if !options[:accept_all_exit_codes] && !result.exit_code_in?(Array(options[:acceptable_exit_codes] || [0, nil])) raise CommandFailure, "Host '#{self}' exited with #{result.exit_code} running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}"
1
require 'socket' require 'timeout' require 'benchmark' require 'rsync' require 'beaker/dsl/helpers' require 'beaker/dsl/patterns' [ 'command', 'ssh_connection'].each do |lib| require "beaker/#{lib}" end module Beaker class Host SELECT_TIMEOUT = 30 include Beaker::DSL::Helpers include Beaker::DSL::Patterns class CommandFailure < StandardError; end # This class provides array syntax for using puppet --configprint on a host class PuppetConfigReader def initialize(host, command) @host = host @command = command end def has_key?(k) cmd = PuppetCommand.new(@command, '--configprint all') keys = @host.exec(cmd).stdout.split("\n").collect do |x| x[/^[^\s]+/] end keys.include?(k) end def [](k) cmd = PuppetCommand.new(@command, "--configprint #{k.to_s}") @host.exec(cmd).stdout.strip end end def self.create name, host_hash, options case host_hash['platform'] when /windows/ cygwin = host_hash['is_cygwin'] if cygwin.nil? or cygwin == true Windows::Host.new name, host_hash, options else PSWindows::Host.new name, host_hash, options end when /aix/ Aix::Host.new name, host_hash, options when /osx/ Mac::Host.new name, host_hash, options when /freebsd/ FreeBSD::Host.new name, host_hash, options when /eos/ Eos::Host.new name, host_hash, options when /cisco/ Cisco::Host.new name, host_hash, options else Unix::Host.new name, host_hash, options end end attr_accessor :logger attr_reader :name, :host_hash, :options def initialize name, host_hash, options @logger = host_hash[:logger] || options[:logger] @name, @host_hash, @options = name.to_s, host_hash.dup, options.dup @host_hash = self.platform_defaults.merge(@host_hash) pkg_initialize end def pkg_initialize # This method should be overridden by platform-specific code to # handle whatever packaging-related initialization is necessary. end def node_name # TODO: might want to consider caching here; not doing it for now because # I haven't thought through all of the possible scenarios that could # cause the value to change after it had been cached. result = puppet_configprint['node_name_value'].strip end def port_open? port begin Timeout.timeout SELECT_TIMEOUT do TCPSocket.new(reachable_name, port).close return true end rescue Errno::ECONNREFUSED, Timeout::Error, Errno::ETIMEDOUT return false end end def up? begin Socket.getaddrinfo( reachable_name, nil ) return true rescue SocketError return false end end # Return the preferred method to reach the host, will use IP is available and then default to {#hostname}. def reachable_name self['ip'] || hostname end # Returning our PuppetConfigReader here allows users of the Host # class to do things like `host.puppet['vardir']` to query the # 'main' section or, if they want the configuration for a # particular run type, `host.puppet('agent')['vardir']` def puppet_configprint(command='agent') PuppetConfigReader.new(self, command) end alias_method :puppet, :puppet_configprint def []= k, v host_hash[k] = v end # Does this host have this key? Either as defined in the host itself, or globally? def [] k host_hash[k] || options[k] end # Does this host have this key? Either as defined in the host itself, or globally? def has_key? k host_hash.has_key?(k) || options.has_key?(k) end def delete k host_hash.delete(k) end # The {#hostname} of this host. def to_str hostname end # The {#hostname} of this host. def to_s hostname end # Return the public name of the particular host, which may be different then the name of the host provided in # the configuration file as some provisioners create random, unique hostnames. def hostname host_hash['vmhostname'] || @name end def + other @name + other end def is_pe? self['type'] && self['type'].to_s =~ /pe/ end def is_cygwin? self.class == Windows::Host end def is_powershell? self.class == PSWindows::Host end def platform self['platform'] end # True if this is a pe run, or if the host has had a 'use-service' property set. def use_service_scripts? is_pe? || self['use-service'] end # Mirrors the true/false value of the host's 'graceful-restarts' property, # or falls back to the value of +is_using_passenger?+ if # 'graceful-restarts' is nil, but only if this is not a PE run (foss only). def graceful_restarts? graceful = if !self['graceful-restarts'].nil? self['graceful-restarts'] else !is_pe? && is_using_passenger? end graceful end # Modifies the host settings to indicate that it will be using passenger service scripts, # (apache2) by default. Does nothing if this is a PE host, since it is already using # passenger. # @param [String] puppetservice Name of the service script that should be # called to stop/startPuppet on this host. Defaults to 'apache2'. def uses_passenger!(puppetservice = 'apache2') if !is_pe? self['passenger'] = true self['puppetservice'] = puppetservice self['use-service'] = true end return true end # True if this is a PE run, or if the host's 'passenger' property has been set. def is_using_passenger? is_pe? || self['passenger'] end def log_prefix if host_hash['vmhostname'] "#{self} (#{@name})" else self.to_s end end #Determine the ip address of this host def get_ip @logger.warn("Uh oh, this should be handled by sub-classes but hasn't been") end #Return the ip address of this host #Always pull fresh, because this can sometimes change def ip self['ip'] = get_ip end #@return [Boolean] true if x86_64, false otherwise def is_x86_64? @x86_64 ||= determine_if_x86_64 end def connection # create new connection object if necessary @connection ||= SshConnection.connect( { :ip => self['ip'], :vmhostname => self['vmhostname'], :hostname => @name }, self['user'], self['ssh'], { :logger => @logger } ) # update connection information if self['ip'] && (@connection.ip != self['ip']) @connection.ip = self['ip'] end if self['vmhostname'] && (@connection.vmhostname != self['vmhostname']) @connection.vmhostname = self['vmhostname'] end if @name && (@connection.hostname != @name) @connection.hostname = @name end @connection end def close if @connection @connection.close # update connection information @connection.ip = self['ip'] if self['ip'] @connection.vmhostname = self['vmhostname'] if self['vmhostname'] @connection.hostname = @name end @connection = nil end def exec command, options={} result = nil # I've always found this confusing cmdline = command.cmd_line(self) # use the value of :dry_run passed to the method unless # undefined, then use parsed @options hash. options[:dry_run] ||= @options[:dry_run] if options[:dry_run] @logger.debug "\n Running in :dry_run mode. Command #{cmdline} not executed." result = Beaker::NullResult.new(self, command) return result end if options[:silent] output_callback = nil else @logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{cmdline}" if @options[:color_host_output] output_callback = logger.method(:color_host_output) else output_callback = logger.method(:host_output) end end unless options[:dry_run] # is this returning a result object? # the options should come at the end of the method signature (rubyism) # and they shouldn't be ssh specific @logger.step_in() seconds = Benchmark.realtime { result = connection.execute(cmdline, options, output_callback) } @logger.step_out() if not options[:silent] @logger.debug "\n#{log_prefix} executed in %0.2f seconds" % seconds end unless options[:silent] # What? result.log(@logger) if !options[:expect_connection_failure] && !result.exit_code # no exit code was collected, so the stream failed raise CommandFailure, "Host '#{self}' connection failure running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}" end if options[:expect_connection_failure] && result.exit_code # should have had a connection failure, but didn't # wait to see if the connection failure will be generation, otherwise raise error if not connection.wait_for_connection_failure(options, output_callback) raise CommandFailure, "Host '#{self}' should have resulted in a connection failure running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}" end end # No, TestCase has the knowledge about whether its failed, checking acceptable # exit codes at the host level and then raising... # is it necessary to break execution?? if options[:accept_all_exit_codes] && options[:acceptable_exit_codes] @logger.warn ":accept_all_exit_codes & :acceptable_exit_codes set. :accept_all_exit_codes overrides, but they shouldn't both be set at once" end if !options[:accept_all_exit_codes] && !result.exit_code_in?(Array(options[:acceptable_exit_codes] || [0, nil])) raise CommandFailure, "Host '#{self}' exited with #{result.exit_code} running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}" end end end result end # scp files from the localhost to this test host, if a directory is provided it is recursively copied. # If the provided source is a directory both the contents of the directory and the directory # itself will be copied to the host, if you only want to copy directory contents you will either need to specify # the contents file by file or do a separate 'mv' command post scp_to to create the directory structure as desired. # To determine if a file/dir is 'ignored' we compare to any contents of the source dir and NOT any part of the path # to that source dir. # # @param source [String] The path to the file/dir to upload # @param target_path [String] The destination path on the host # @param options [Hash{Symbol=>String}] Options to alter execution # @option options [Array<String>] :ignore An array of file/dir paths that will not be copied to the host # @example # do_scp_to('source/dir1/dir2/dir3', 'target') # -> will result in creation of target/source/dir1/dir2/dir3 on host # # do_scp_to('source/file.rb', 'target', { :ignore => 'file.rb' } # -> will result in not files copyed to the host, all are ignored def do_scp_to source, target_path, options target = self.scp_path( target_path ) # use the value of :dry_run passed to the method unless # undefined, then use parsed @options hash. options[:dry_run] ||= @options[:dry_run] if options[:dry_run] scp_cmd = "scp #{source} #{@name}:#{target}" @logger.debug "\n Running in :dry_run mode. localhost $ #{scp_cmd} not executed." return NullResult.new(self, scp_cmd) end @logger.notify "localhost $ scp #{source} #{@name}:#{target} {:ignore => #{options[:ignore]}}" result = Result.new(@name, [source, target]) has_ignore = options[:ignore] and not options[:ignore].empty? # construct the regex for matching ignored files/dirs ignore_re = nil if has_ignore ignore_arr = Array(options[:ignore]).map do |entry| "((\/|\\A)#{Regexp.escape(entry)}(\/|\\z))" end ignore_re = Regexp.new(ignore_arr.join('|')) @logger.debug("going to ignore #{ignore_re}") end # either a single file, or a directory with no ignores if not File.file?(source) and not File.directory?(source) raise IOError, "No such file or directory - #{source}" end if File.file?(source) or (File.directory?(source) and not has_ignore) source_file = source if has_ignore and (source =~ ignore_re) @logger.trace "After rejecting ignored files/dirs, there is no file to copy" source_file = nil result.stdout = "No files to copy" result.exit_code = 1 end if source_file result = connection.scp_to(source_file, target, options) @logger.trace result.stdout end else # a directory with ignores dir_source = Dir.glob("#{source}/**/*").reject do |f| f.gsub(/\A#{Regexp.escape(source)}/, '') =~ ignore_re #only match against subdirs, not full path end @logger.trace "After rejecting ignored files/dirs, going to scp [#{dir_source.join(", ")}]" # create necessary directory structure on host # run this quietly (no STDOUT) @logger.quiet(true) required_dirs = (dir_source.map{ | dir | File.dirname(dir) }).uniq require 'pathname' required_dirs.each do |dir| dir_path = Pathname.new(dir) if dir_path.absolute? mkdir_p(File.join(target, dir.gsub(/#{Regexp.escape(File.dirname(File.absolute_path(source)))}/, ''))) else mkdir_p( File.join(target, dir) ) end end @logger.quiet(false) # copy each file to the host dir_source.each do |s| # Copy files, not directories (as they are copied recursively) next if File.directory?(s) s_path = Pathname.new(s) if s_path.absolute? file_path = File.join(target, File.dirname(s).gsub(/#{Regexp.escape(File.dirname(File.absolute_path(source)))}/,'')) else file_path = File.join(target, File.dirname(s)) end result = connection.scp_to(s, file_path, options) @logger.trace result.stdout end end self.scp_post_operations( target, target_path ) return result end def do_scp_from source, target, options # use the value of :dry_run passed to the method unless # undefined, then use parsed @options hash. options[:dry_run] ||= @options[:dry_run] if options[:dry_run] scp_cmd = "scp #{@name}:#{source} #{target}" @logger.debug "\n Running in :dry_run mode. localhost $ #{scp_cmd} not executed." return NullResult.new(self, scp_cmd) end @logger.debug "localhost $ scp #{@name}:#{source} #{target}" result = connection.scp_from(source, target, options) @logger.debug result.stdout return result end # rsync a file or directory from the localhost to this test host # @param from_path [String] The path to the file/dir to upload # @param to_path [String] The destination path on the host # @param opts [Hash{Symbol=>String}] Options to alter execution # @option opts [Array<String>] :ignore An array of file/dir paths that will not be copied to the host def do_rsync_to from_path, to_path, opts = {} ssh_opts = self['ssh'] rsync_args = [] ssh_args = [] if not File.file?(from_path) and not File.directory?(from_path) raise IOError, "No such file or directory - #{from_path}" end # We enable achieve mode and compression rsync_args << "-az" if not self['user'] user = "root" else user = self['user'] end hostname_with_user = "#{user}@#{reachable_name}" Rsync.host = hostname_with_user # vagrant uses temporary ssh configs in order to use dynamic keys # without this config option using ssh may prompt for password if ssh_opts[:config] and File.exists?(ssh_opts[:config]) ssh_args << "-F #{ssh_opts[:config]}" else if ssh_opts.has_key?('keys') and ssh_opts.has_key?('auth_methods') and ssh_opts['auth_methods'].include?('publickey') key = ssh_opts['keys'] # If an array was set, then we use the first value if key.is_a? Array key = key.first end # We need to expand tilde manually as rsync can be # funny sometimes key = File.expand_path(key) ssh_args << "-i #{key}" end end if ssh_opts.has_key?(:port) ssh_args << "-p #{ssh_opts[:port]}" end # We disable prompt when host isn't known ssh_args << "-o 'StrictHostKeyChecking no'" if not ssh_args.empty? rsync_args << "-e \"ssh #{ssh_args.join(' ')}\"" end if opts.has_key?(:ignore) and not opts[:ignore].empty? opts[:ignore].map! do |value| "--exclude '#{value}'" end rsync_args << opts[:ignore].join(' ') end # We assume that the *contents* of the directory 'from_path' needs to be # copied into the directory 'to_path' if File.directory?(from_path) and not from_path.end_with?('/') from_path += '/' end @logger.notify "rsync: localhost:#{from_path} to #{hostname_with_user}:#{to_path} {:ignore => #{opts[:ignore]}}" result = Rsync.run(from_path, to_path, rsync_args) @logger.debug("rsync returned #{result.inspect}") result end end [ 'unix', 'aix', 'mac', 'freebsd', 'windows', 'pswindows', 'eos', 'cisco', ].each do |lib| require "beaker/host/#{lib}" end end
1
12,594
should this state that we're falling back to `:acceptable_exit_codes`?
voxpupuli-beaker
rb
@@ -87,6 +87,16 @@ def df_type_check(_, value): success=True, metadata_entries=[ EventMetadataEntry.text(str(len(value)), 'row_count', 'Number of rows in DataFrame'), + EventMetadataEntry.text(str(value.shape), "shape", "The shape of the DataFrame."), + EventMetadataEntry.md( + value.dtypes.to_markdown(), "dtypes", "The dtypes within the DataFrame." + ), + EventMetadataEntry.md( + dataframe.head().to_markdown(), "head", "A preview of the first 5 rows." + ), + EventMetadataEntry.md( + dataframe.tail().to_markdown(), "tail", "A preview of the last 5 rows." + ), # string cast columns since they may be things like datetime EventMetadataEntry.json({'columns': list(map(str, value.columns))}, 'metadata'), ],
1
import pandas as pd from dagster_pandas.constraints import ( ColumnExistsConstraint, ColumnTypeConstraint, ConstraintViolationException, ) from dagster_pandas.validation import PandasColumn, validate_constraints from dagster import ( DagsterInvariantViolationError, DagsterType, EventMetadataEntry, Field, Materialization, Path, String, TypeCheck, check, ) from dagster.config.field_utils import Selector from dagster.core.types.config_schema import input_selector_schema, output_selector_schema CONSTRAINT_BLACKLIST = {ColumnExistsConstraint, ColumnTypeConstraint} def dict_without_keys(ddict, *keys): return {key: value for key, value in ddict.items() if key not in set(keys)} @output_selector_schema( Selector( { 'csv': {'path': Path, 'sep': Field(String, is_required=False, default_value=','),}, 'parquet': {'path': Path}, 'table': {'path': Path}, }, ) ) def dataframe_output_schema(_context, file_type, file_options, pandas_df): check.str_param(file_type, 'file_type') check.dict_param(file_options, 'file_options') check.inst_param(pandas_df, 'pandas_df', pd.DataFrame) if file_type == 'csv': path = file_options['path'] pandas_df.to_csv(path, index=False, **dict_without_keys(file_options, 'path')) elif file_type == 'parquet': pandas_df.to_parquet(file_options['path']) elif file_type == 'table': pandas_df.to_csv(file_options['path'], sep='\t', index=False) else: check.failed('Unsupported file_type {file_type}'.format(file_type=file_type)) return Materialization.file(file_options['path']) @input_selector_schema( Selector( { 'csv': {'path': Path, 'sep': Field(String, is_required=False, default_value=','),}, 'parquet': {'path': Path}, 'table': {'path': Path}, }, ) ) def dataframe_input_schema(_context, file_type, file_options): check.str_param(file_type, 'file_type') check.dict_param(file_options, 'file_options') if file_type == 'csv': path = file_options['path'] return pd.read_csv(path, **dict_without_keys(file_options, 'path')) elif file_type == 'parquet': return pd.read_parquet(file_options['path']) elif file_type == 'table': return pd.read_csv(file_options['path'], sep='\t') else: raise DagsterInvariantViolationError( 'Unsupported file_type {file_type}'.format(file_type=file_type) ) def df_type_check(_, value): if not isinstance(value, pd.DataFrame): return TypeCheck(success=False) return TypeCheck( success=True, metadata_entries=[ EventMetadataEntry.text(str(len(value)), 'row_count', 'Number of rows in DataFrame'), # string cast columns since they may be things like datetime EventMetadataEntry.json({'columns': list(map(str, value.columns))}, 'metadata'), ], ) DataFrame = DagsterType( name='PandasDataFrame', description='''Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). See http://pandas.pydata.org/''', input_hydration_config=dataframe_input_schema, output_materialization_config=dataframe_output_schema, type_check_fn=df_type_check, ) def _construct_constraint_list(constraints): def add_bullet(constraint_list, constraint_description): return constraint_list + "+ {constraint_description}\n".format( constraint_description=constraint_description ) constraint_list = "" for constraint in constraints: if constraint.__class__ not in CONSTRAINT_BLACKLIST: constraint_list = add_bullet(constraint_list, constraint.markdown_description) return constraint_list def _build_column_header(column_name, constraints): expected_column_types = None column_type_constraint = [ constraint for constraint in constraints if isinstance(constraint, ColumnTypeConstraint) ] if column_type_constraint: expected_types = tuple(column_type_constraint[0].expected_pandas_dtypes) if expected_types: expected_column_types = ( expected_types[0] if len(expected_types) == 1 else tuple(expected_types) ) column_header = '**{column_name}**'.format(column_name=column_name) if expected_column_types: column_header += ": `{expected_dtypes}`".format(expected_dtypes=expected_column_types) return column_header def create_dagster_pandas_dataframe_description(description, columns): title = "\n".join([description, '### Columns', '']) buildme = title for column in columns: buildme += "{}\n{}\n".format( _build_column_header(column.name, column.constraints), _construct_constraint_list(column.constraints), ) return buildme def create_dagster_pandas_dataframe_type( name, description=None, columns=None, event_metadata_fn=None, dataframe_constraints=None, input_hydration_config=None, output_materialization_config=None, ): """ Constructs a custom pandas dataframe dagster type. Args: name (str): Name of the dagster pandas type. description (Optional[str]): A markdown-formatted string, displayed in tooling. columns (Optional[List[PandasColumn]]): A list of :py:class:`~dagster.PandasColumn` objects which express dataframe column schemas and constraints. event_metadata_fn (Optional[func]): A callable which takes your dataframe and returns a list of EventMetadata which allow you to express things like summary statistics during runtime. dataframe_constraints (Optional[List[DataFrameConstraint]]): A list of objects that inherit from :py:class:`~dagster.DataFrameConstraint`. This allows you to express dataframe-level constraints. input_hydration_config (Optional[InputHydrationConfig]): An instance of a class that inherits from :py:class:`~dagster.InputHydrationConfig`. If None, we will default to using the `dataframe_input_schema` input_hydration_config. output_materialization_config (Optional[OutputMaterializationConfig]): An instance of a class that inherits from :py:class:`~dagster.OutputMaterializationConfig`. If None, we will default to using the `dataframe_output_schema` output_materialization_config. """ # We allow for the plugging in of input_hydration_config/output_materialization_configs so that # Users can hydrate and persist their custom dataframes via configuration their own way if the default # configs don't suffice. This is purely optional. check.str_param(name, 'name') event_metadata_fn = check.opt_callable_param(event_metadata_fn, 'event_metadata_fn') description = create_dagster_pandas_dataframe_description( check.opt_str_param(description, 'description', default=''), check.opt_list_param(columns, 'columns', of_type=PandasColumn), ) def _dagster_type_check(_, value): if not isinstance(value, pd.DataFrame): return TypeCheck( success=False, description='Must be a pandas.DataFrame. Got value of type. {type_name}'.format( type_name=type(value).__name__ ), ) try: validate_constraints( value, pandas_columns=columns, dataframe_constraints=dataframe_constraints ) except ConstraintViolationException as e: return TypeCheck(success=False, description=str(e)) return TypeCheck( success=True, metadata_entries=_execute_summary_stats(name, value, event_metadata_fn) if event_metadata_fn else None, ) return DagsterType( name=name, type_check_fn=_dagster_type_check, input_hydration_config=input_hydration_config if input_hydration_config else dataframe_input_schema, output_materialization_config=output_materialization_config if output_materialization_config else dataframe_output_schema, description=description, ) def _execute_summary_stats(type_name, value, event_metadata_fn): if not event_metadata_fn: return [] metadata_entries = event_metadata_fn(value) if not ( isinstance(metadata_entries, list) and all(isinstance(item, EventMetadataEntry) for item in metadata_entries) ): raise DagsterInvariantViolationError( ( 'The return value of the user-defined summary_statistics function ' 'for pandas data frame type {type_name} returned {value}. ' 'This function must return List[EventMetadataEntry]' ).format(type_name=type_name, value=repr(metadata_entries)) ) return metadata_entries
1
13,629
I don't think this works. The dataframe object is encapsulated in the value parameter. This might be the root of the failing checks.
dagster-io-dagster
py
@@ -163,7 +163,7 @@ class TopologyDescription { } if (topologyType === TopologyType.Unknown) { - if (serverType === ServerType.Standalone) { + if (serverType === ServerType.Standalone && this.servers.size !== 1) { serverDescriptions.delete(address); } else { topologyType = topologyTypeForServerType(serverType);
1
'use strict'; const { ServerDescription } = require('./server_description'); const WIRE_CONSTANTS = require('../cmap/wire_protocol/constants'); const { TopologyType, ServerType } = require('./common'); // contstants related to compatability checks const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION; const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION; const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION; const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION; // Representation of a deployment of servers class TopologyDescription { /** * Create a TopologyDescription * * @param {string} topologyType * @param {Map<string, ServerDescription>} serverDescriptions the a map of address to ServerDescription * @param {string} setName * @param {number} maxSetVersion * @param {ObjectId} maxElectionId * @param {any} commonWireVersion * @param {any} options */ constructor( topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, options ) { options = options || {}; // TODO: consider assigning all these values to a temporary value `s` which // we use `Object.freeze` on, ensuring the internal state of this type // is immutable. this.type = topologyType || TopologyType.Unknown; this.setName = setName || null; this.maxSetVersion = maxSetVersion || null; this.maxElectionId = maxElectionId || null; this.servers = serverDescriptions || new Map(); this.stale = false; this.compatible = true; this.compatibilityError = null; this.logicalSessionTimeoutMinutes = null; this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0; this.localThresholdMS = options.localThresholdMS || 0; this.commonWireVersion = commonWireVersion || null; // save this locally, but don't display when printing the instance out Object.defineProperty(this, 'options', { value: options, enumberable: false }); // determine server compatibility for (const serverDescription of this.servers.values()) { if (serverDescription.type === ServerType.Unknown) continue; if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) { this.compatible = false; this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${serverDescription.minWireVersion}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`; } if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) { this.compatible = false; this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${serverDescription.maxWireVersion}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`; break; } } // Whenever a client updates the TopologyDescription from an ismaster response, it MUST set // TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes // value among ServerDescriptions of all data-bearing server types. If any have a null // logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be // set to null. const readableServers = Array.from(this.servers.values()).filter(s => s.isReadable); this.logicalSessionTimeoutMinutes = readableServers.reduce((result, server) => { if (server.logicalSessionTimeoutMinutes == null) return null; if (result == null) return server.logicalSessionTimeoutMinutes; return Math.min(result, server.logicalSessionTimeoutMinutes); }, null); } /** * Returns a new TopologyDescription based on the SrvPollingEvent * * @param {SrvPollingEvent} ev The event */ updateFromSrvPollingEvent(ev) { const newAddresses = ev.addresses(); const serverDescriptions = new Map(this.servers); for (const server of this.servers) { if (newAddresses.has(server[0])) { newAddresses.delete(server[0]); } else { serverDescriptions.delete(server[0]); } } if (serverDescriptions.size === this.servers.size && newAddresses.size === 0) { return this; } for (const address of newAddresses) { serverDescriptions.set(address, new ServerDescription(address)); } return new TopologyDescription( this.type, serverDescriptions, this.setName, this.maxSetVersion, this.maxElectionId, this.commonWireVersion, this.options, null ); } /** * Returns a copy of this description updated with a given ServerDescription * * @param {ServerDescription} serverDescription */ update(serverDescription) { const address = serverDescription.address; // NOTE: there are a number of prime targets for refactoring here // once we support destructuring assignments // potentially mutated values let topologyType = this.type; let setName = this.setName; let maxSetVersion = this.maxSetVersion; let maxElectionId = this.maxElectionId; let commonWireVersion = this.commonWireVersion; const serverType = serverDescription.type; let serverDescriptions = new Map(this.servers); // update common wire version if (serverDescription.maxWireVersion !== 0) { if (commonWireVersion == null) { commonWireVersion = serverDescription.maxWireVersion; } else { commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion); } } // update the actual server description serverDescriptions.set(address, serverDescription); if (topologyType === TopologyType.Single) { // once we are defined as single, that never changes return new TopologyDescription( TopologyType.Single, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, this.options ); } if (topologyType === TopologyType.Unknown) { if (serverType === ServerType.Standalone) { serverDescriptions.delete(address); } else { topologyType = topologyTypeForServerType(serverType); } } if (topologyType === TopologyType.Sharded) { if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) === -1) { serverDescriptions.delete(address); } } if (topologyType === TopologyType.ReplicaSetNoPrimary) { if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) { serverDescriptions.delete(address); } if (serverType === ServerType.RSPrimary) { const result = updateRsFromPrimary( serverDescriptions, setName, serverDescription, maxSetVersion, maxElectionId ); (topologyType = result[0]), (setName = result[1]), (maxSetVersion = result[2]), (maxElectionId = result[3]); } else if ( [ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0 ) { const result = updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription); (topologyType = result[0]), (setName = result[1]); } } if (topologyType === TopologyType.ReplicaSetWithPrimary) { if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) { serverDescriptions.delete(address); topologyType = checkHasPrimary(serverDescriptions); } else if (serverType === ServerType.RSPrimary) { const result = updateRsFromPrimary( serverDescriptions, setName, serverDescription, maxSetVersion, maxElectionId ); (topologyType = result[0]), (setName = result[1]), (maxSetVersion = result[2]), (maxElectionId = result[3]); } else if ( [ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0 ) { topologyType = updateRsWithPrimaryFromMember( serverDescriptions, setName, serverDescription ); } else { topologyType = checkHasPrimary(serverDescriptions); } } return new TopologyDescription( topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, this.options ); } get error() { const descriptionsWithError = Array.from(this.servers.values()).filter(sd => sd.error); if (descriptionsWithError.length > 0) { return descriptionsWithError[0].error; } return undefined; } /** * Determines if the topology description has any known servers */ get hasKnownServers() { return Array.from(this.servers.values()).some(sd => sd.type !== ServerType.Unknown); } /** * Determines if this topology description has a data-bearing server available. */ get hasDataBearingServers() { return Array.from(this.servers.values()).some(sd => sd.isDataBearing); } /** * Determines if the topology has a definition for the provided address * * @param {string} address * @returns {boolean} Whether the topology knows about this server */ hasServer(address) { return this.servers.has(address); } } function topologyTypeForServerType(serverType) { if (serverType === ServerType.Mongos) return TopologyType.Sharded; if (serverType === ServerType.RSPrimary) return TopologyType.ReplicaSetWithPrimary; return TopologyType.ReplicaSetNoPrimary; } function compareObjectId(oid1, oid2) { if (oid1 == null) { return -1; } if (oid2 == null) { return 1; } if (oid1.id instanceof Buffer && oid2.id instanceof Buffer) { const oid1Buffer = oid1.id; const oid2Buffer = oid2.id; return oid1Buffer.compare(oid2Buffer); } const oid1String = oid1.toString(); const oid2String = oid2.toString(); return oid1String.localeCompare(oid2String); } function updateRsFromPrimary( serverDescriptions, setName, serverDescription, maxSetVersion, maxElectionId ) { setName = setName || serverDescription.setName; if (setName !== serverDescription.setName) { serverDescriptions.delete(serverDescription.address); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } const electionId = serverDescription.electionId ? serverDescription.electionId : null; if (serverDescription.setVersion && electionId) { if (maxSetVersion && maxElectionId) { if ( maxSetVersion > serverDescription.setVersion || compareObjectId(maxElectionId, electionId) > 0 ) { // this primary is stale, we must remove it serverDescriptions.set( serverDescription.address, new ServerDescription(serverDescription.address) ); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } } maxElectionId = serverDescription.electionId; } if ( serverDescription.setVersion != null && (maxSetVersion == null || serverDescription.setVersion > maxSetVersion) ) { maxSetVersion = serverDescription.setVersion; } // We've heard from the primary. Is it the same primary as before? for (const address of serverDescriptions.keys()) { const server = serverDescriptions.get(address); if (server.type === ServerType.RSPrimary && server.address !== serverDescription.address) { // Reset old primary's type to Unknown. serverDescriptions.set(address, new ServerDescription(server.address)); // There can only be one primary break; } } // Discover new hosts from this primary's response. serverDescription.allHosts.forEach(address => { if (!serverDescriptions.has(address)) { serverDescriptions.set(address, new ServerDescription(address)); } }); // Remove hosts not in the response. const currentAddresses = Array.from(serverDescriptions.keys()); const responseAddresses = serverDescription.allHosts; currentAddresses .filter(addr => responseAddresses.indexOf(addr) === -1) .forEach(address => { serverDescriptions.delete(address); }); return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId]; } function updateRsWithPrimaryFromMember(serverDescriptions, setName, serverDescription) { if (setName == null) { throw new TypeError('setName is required'); } if ( setName !== serverDescription.setName || (serverDescription.me && serverDescription.address !== serverDescription.me) ) { serverDescriptions.delete(serverDescription.address); } return checkHasPrimary(serverDescriptions); } function updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription) { let topologyType = TopologyType.ReplicaSetNoPrimary; setName = setName || serverDescription.setName; if (setName !== serverDescription.setName) { serverDescriptions.delete(serverDescription.address); return [topologyType, setName]; } serverDescription.allHosts.forEach(address => { if (!serverDescriptions.has(address)) { serverDescriptions.set(address, new ServerDescription(address)); } }); if (serverDescription.me && serverDescription.address !== serverDescription.me) { serverDescriptions.delete(serverDescription.address); } return [topologyType, setName]; } function checkHasPrimary(serverDescriptions) { for (const addr of serverDescriptions.keys()) { if (serverDescriptions.get(addr).type === ServerType.RSPrimary) { return TopologyType.ReplicaSetWithPrimary; } } return TopologyType.ReplicaSetNoPrimary; } module.exports = { TopologyDescription };
1
17,593
Why was it necessary to add this check against `this.servers.size`?
mongodb-node-mongodb-native
js
@@ -61,6 +61,11 @@ std::string getWrongOptionHelp(const engine::api::TableParameters &parameters) help = "fallback_speed must be > 0"; } + if (parameters.scale_factor < 0) + { + help = "scale_factor must be > 0"; + } + return help; } } // anon. ns
1
#include "server/service/table_service.hpp" #include "server/api/parameters_parser.hpp" #include "engine/api/table_parameters.hpp" #include "util/json_container.hpp" #include <boost/format.hpp> namespace osrm { namespace server { namespace service { namespace { const constexpr char PARAMETER_SIZE_MISMATCH_MSG[] = "Number of elements in %1% size %2% does not match coordinate size %3%"; template <typename ParamT> bool constrainParamSize(const char *msg_template, const char *name, const ParamT &param, const std::size_t target_size, std::string &help) { if (param.size() > 0 && param.size() != target_size) { help = (boost::format(msg_template) % name % param.size() % target_size).str(); return true; } return false; } std::string getWrongOptionHelp(const engine::api::TableParameters &parameters) { std::string help; const auto coord_size = parameters.coordinates.size(); const bool param_size_mismatch = constrainParamSize( PARAMETER_SIZE_MISMATCH_MSG, "hints", parameters.hints, coord_size, help) || constrainParamSize( PARAMETER_SIZE_MISMATCH_MSG, "bearings", parameters.bearings, coord_size, help) || constrainParamSize( PARAMETER_SIZE_MISMATCH_MSG, "radiuses", parameters.radiuses, coord_size, help) || constrainParamSize( PARAMETER_SIZE_MISMATCH_MSG, "approaches", parameters.approaches, coord_size, help); if (!param_size_mismatch && parameters.coordinates.size() < 2) { help = "Number of coordinates needs to be at least two."; } if (parameters.fallback_speed < 0) { help = "fallback_speed must be > 0"; } return help; } } // anon. ns engine::Status TableService::RunQuery(std::size_t prefix_length, std::string &query, ResultT &result) { result = util::json::Object(); auto &json_result = result.get<util::json::Object>(); auto query_iterator = query.begin(); auto parameters = api::parseParameters<engine::api::TableParameters>(query_iterator, query.end()); if (!parameters || query_iterator != query.end()) { const auto position = std::distance(query.begin(), query_iterator); json_result.values["code"] = "InvalidQuery"; json_result.values["message"] = "Query string malformed close to position " + std::to_string(prefix_length + position); return engine::Status::Error; } BOOST_ASSERT(parameters); if (!parameters->IsValid()) { json_result.values["code"] = "InvalidOptions"; json_result.values["message"] = getWrongOptionHelp(*parameters); return engine::Status::Error; } BOOST_ASSERT(parameters->IsValid()); return BaseService::routing_machine.Table(*parameters, json_result); } } } }
1
23,943
This tests for < 0 but the error message says it must be > 0. The `if` should probably be `<=` to match the message.
Project-OSRM-osrm-backend
cpp
@@ -25,12 +25,12 @@ namespace Datadog.Trace.ClrProfiler.Integrations { var request = (WebRequest)webRequest; - if (!IsTracingEnabled(request)) + if (!(request is HttpWebRequest) || !IsTracingEnabled(request)) { return request.GetResponse(); } - string httpMethod = request.Method.ToUpperInvariant(); + string httpMethod = request.Method?.ToUpperInvariant() ?? "UNKNOWN"; string integrationName = typeof(WebRequestIntegration).Name.TrimEnd("Integration", StringComparison.OrdinalIgnoreCase); using (var scope = ScopeFactory.CreateOutboundHttpScope(Tracer.Instance, httpMethod, request.RequestUri, integrationName))
1
using System; using System.Net; using System.Threading.Tasks; using Datadog.Trace.ExtensionMethods; namespace Datadog.Trace.ClrProfiler.Integrations { /// <summary> /// Tracer integration for WebRequest. /// </summary> public static class WebRequestIntegration { /// <summary> /// Instrumentation wrapper for <see cref="WebRequest.GetResponse"/>. /// </summary> /// <param name="webRequest">The <see cref="WebRequest"/> instance to instrument.</param> /// <returns>Returns the value returned by the inner method call.</returns> [InterceptMethod( TargetAssembly = "System", // .NET Framework TargetType = "System.Net.WebRequest")] [InterceptMethod( TargetAssembly = "System.Net.Requests", // .NET Core TargetType = "System.Net.WebRequest")] public static object GetResponse(object webRequest) { var request = (WebRequest)webRequest; if (!IsTracingEnabled(request)) { return request.GetResponse(); } string httpMethod = request.Method.ToUpperInvariant(); string integrationName = typeof(WebRequestIntegration).Name.TrimEnd("Integration", StringComparison.OrdinalIgnoreCase); using (var scope = ScopeFactory.CreateOutboundHttpScope(Tracer.Instance, httpMethod, request.RequestUri, integrationName)) { try { if (scope != null) { // add distributed tracing headers to the HTTP request SpanContextPropagator.Instance.Inject(scope.Span.Context, request.Headers.Wrap()); } WebResponse response = request.GetResponse(); if (response is HttpWebResponse webResponse) { scope?.Span.SetTag(Tags.HttpStatusCode, ((int)webResponse.StatusCode).ToString()); } return response; } catch (Exception ex) when (scope?.Span.SetExceptionForFilter(ex) ?? false) { // unreachable code throw; } } } /// <summary> /// Instrumentation wrapper for <see cref="WebRequest.GetResponseAsync"/>. /// </summary> /// <param name="request">The <see cref="WebRequest"/> instance to instrument.</param> /// <returns>Returns the value returned by the inner method call.</returns> [InterceptMethod( TargetAssembly = "System.Net", TargetType = "System.Net.WebRequest")] public static object GetResponseAsync(object request) { return GetResponseAsyncInternal((WebRequest)request); } private static async Task<WebResponse> GetResponseAsyncInternal(WebRequest request) { if (!IsTracingEnabled(request)) { return await request.GetResponseAsync().ConfigureAwait(false); } string httpMethod = request.Method.ToUpperInvariant(); string integrationName = typeof(WebRequestIntegration).Name.TrimEnd("Integration", StringComparison.OrdinalIgnoreCase); using (var scope = ScopeFactory.CreateOutboundHttpScope(Tracer.Instance, httpMethod, request.RequestUri, integrationName)) { try { if (scope != null) { // add distributed tracing headers to the HTTP request SpanContextPropagator.Instance.Inject(scope.Span.Context, request.Headers.Wrap()); } WebResponse response = await request.GetResponseAsync().ConfigureAwait(false); if (response is HttpWebResponse webResponse) { scope?.Span.SetTag(Tags.HttpStatusCode, ((int)webResponse.StatusCode).ToString()); } return response; } catch (Exception ex) when (scope?.Span.SetExceptionForFilter(ex) ?? false) { // unreachable code throw; } } } private static bool IsTracingEnabled(WebRequest request) { // check if tracing is disabled for this request via http header string value = request.Headers[HttpHeaderNames.TracingEnabled]; return !string.Equals(value, "false", StringComparison.OrdinalIgnoreCase); } } }
1
14,885
Short circuits when it's something like a WebPackRequest, which we should instrument and test specifically.
DataDog-dd-trace-dotnet
.cs
@@ -23,7 +23,7 @@ var ( func init() { flag.BoolVar(&fProd, "prod", false, "disable development mode") - flag.BoolVar(&fDiskCertCache, "use-disk-cert-cache-dev", false, "cache cert on disk") + flag.BoolVar(&fDiskCertCache, "use-disk-cert-cache", false, "cache cert on disk") } func main() {
1
// Copyright 2017 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package main import ( "context" "flag" "os" "github.com/keybase/kbfs/env" "github.com/keybase/kbfs/libkbfs" "github.com/keybase/kbfs/libpages" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) var ( fProd bool fDiskCertCache bool ) func init() { flag.BoolVar(&fProd, "prod", false, "disable development mode") flag.BoolVar(&fDiskCertCache, "use-disk-cert-cache-dev", false, "cache cert on disk") } func main() { flag.Parse() ctx, cancel := context.WithCancel(context.Background()) var logger *zap.Logger var err error if fProd { logger, err = zap.NewProduction() } else { loggerConfig := zap.NewDevelopmentConfig() loggerConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder loggerConfig.DisableStacktrace = true logger, err = loggerConfig.Build() } if err != nil { panic(err) } if fDiskCertCache && fProd { logger.Panic("disk cert cache should be used in development only") } // Hack to make libkbfs.Init connect to prod {md,b}server all the time. os.Setenv("KEYBASE_RUN_MODE", "prod") kbCtx := env.NewContext() params := libkbfs.DefaultInitParams(kbCtx) params.EnableJournal = false params.Debug = true kbfsLog, err := libkbfs.InitLog(params, kbCtx) if err != nil { logger.Panic("libkbfs.InitLog", zap.Error(err)) } kbConfig, err := libkbfs.Init( ctx, kbCtx, params, nil, cancel, kbfsLog) if err != nil { logger.Panic("libkbfs.Init", zap.Error(err)) } serverConfig := libpages.ServerConfig{ // Connect to staging Let's Encrypt server while we are testing since // the rate-limit is way higher. UseStaging: true, Logger: logger, UseDiskCacheForDev: fDiskCertCache, } libpages.ListenAndServe(ctx, serverConfig, kbConfig) }
1
18,808
Should this be true now by default?
keybase-kbfs
go
@@ -131,6 +131,7 @@ public class AzkabanExecutorServer { root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor"); root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx"); root.addServlet(new ServletHolder(new StatsServlet()), "/stats"); + root.addServlet(new ServletHolder(new StatisticsServlet()), "/stastics"); root.setAttribute(ServerConstants.AZKABAN_SERVLET_CONTEXT_KEY, this);
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.execapp; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.lang.management.ManagementFactory; import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.List; import java.util.TimeZone; import javax.management.MBeanInfo; import javax.management.MBeanServer; import javax.management.ObjectName; import org.apache.log4j.Logger; import org.joda.time.DateTimeZone; import org.mortbay.jetty.Connector; import org.mortbay.jetty.Server; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.ServletHolder; import org.mortbay.thread.QueuedThreadPool; import azkaban.execapp.event.JobCallbackManager; import azkaban.execapp.jmx.JmxFlowRunnerManager; import azkaban.execapp.jmx.JmxJobMBeanManager; import azkaban.execapp.metric.NumFailedFlowMetric; import azkaban.execapp.metric.NumFailedJobMetric; import azkaban.execapp.metric.NumQueuedFlowMetric; import azkaban.execapp.metric.NumRunningFlowMetric; import azkaban.execapp.metric.NumRunningJobMetric; import azkaban.executor.ExecutorLoader; import azkaban.executor.JdbcExecutorLoader; import azkaban.jmx.JmxJettyServer; import azkaban.metric.IMetricEmitter; import azkaban.metric.MetricException; import azkaban.metric.MetricReportManager; import azkaban.metric.inmemoryemitter.InMemoryMetricEmitter; import azkaban.project.JdbcProjectLoader; import azkaban.project.ProjectLoader; import azkaban.server.AzkabanServer; import azkaban.server.ServerConstants; import azkaban.utils.Props; import azkaban.utils.SystemMemoryInfo; import azkaban.utils.Utils; public class AzkabanExecutorServer { private static final String CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY = "jmx.attribute.processor.class"; private static final Logger logger = Logger .getLogger(AzkabanExecutorServer.class); private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024; public static final String AZKABAN_HOME = "AZKABAN_HOME"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String JOBTYPE_PLUGIN_DIR = "azkaban.jobtype.plugin.dir"; public static final String METRIC_INTERVAL = "executor.metric.milisecinterval."; public static final int DEFAULT_PORT_NUMBER = 12321; public static final int DEFAULT_HEADER_BUFFER_SIZE = 4096; private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id"; private static final int DEFAULT_THREAD_NUMBER = 50; private static AzkabanExecutorServer app; private ExecutorLoader executionLoader; private ProjectLoader projectLoader; private FlowRunnerManager runnerManager; private Props props; private Props executorGlobalProps; private Server server; private ArrayList<ObjectName> registeredMBeans = new ArrayList<ObjectName>(); private MBeanServer mbeanServer; /** * Constructor * * @throws Exception */ public AzkabanExecutorServer(Props props) throws Exception { this.props = props; int portNumber = props.getInt("executor.port", DEFAULT_PORT_NUMBER); int maxThreads = props.getInt("executor.maxThreads", DEFAULT_THREAD_NUMBER); server = new Server(portNumber); QueuedThreadPool httpThreadPool = new QueuedThreadPool(maxThreads); server.setThreadPool(httpThreadPool); boolean isStatsOn = props.getBoolean("executor.connector.stats", true); logger.info("Setting up connector with stats on: " + isStatsOn); for (Connector connector : server.getConnectors()) { connector.setStatsOn(isStatsOn); logger.info(String.format( "Jetty connector name: %s, default header buffer size: %d", connector.getName(), connector.getHeaderBufferSize())); connector.setHeaderBufferSize(props.getInt("jetty.headerBufferSize", DEFAULT_HEADER_BUFFER_SIZE)); logger.info(String.format( "Jetty connector name: %s, (if) new header buffer size: %d", connector.getName(), connector.getHeaderBufferSize())); } Context root = new Context(server, "/", Context.SESSIONS); root.setMaxFormContentSize(MAX_FORM_CONTENT_SIZE); root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor"); root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx"); root.addServlet(new ServletHolder(new StatsServlet()), "/stats"); root.setAttribute(ServerConstants.AZKABAN_SERVLET_CONTEXT_KEY, this); executionLoader = createExecLoader(props); projectLoader = createProjectLoader(props); runnerManager = new FlowRunnerManager(props, executionLoader, projectLoader, this .getClass().getClassLoader()); JmxJobMBeanManager.getInstance().initialize(props); // make sure this happens before configureJobCallback(props); configureMBeanServer(); configureMetricReports(); SystemMemoryInfo.init(props.getInt("executor.memCheck.interval", 30)); loadCustomJMXAttributeProcessor(props); try { server.start(); } catch (Exception e) { logger.warn(e); Utils.croak(e.getMessage(), 1); } logger.info("Azkaban Executor Server started on port " + portNumber); } private void configureJobCallback(Props props) { boolean jobCallbackEnabled = props.getBoolean("azkaban.executor.jobcallback.enabled", true); logger.info("Job callback enabled? " + jobCallbackEnabled); if (jobCallbackEnabled) { JobCallbackManager.initialize(props); } } /** * Configure Metric Reporting as per azkaban.properties settings * * @throws MetricException */ private void configureMetricReports() throws MetricException { Props props = getAzkabanProps(); if (props != null && props.getBoolean("executor.metric.reports", false)) { logger.info("Starting to configure Metric Reports"); MetricReportManager metricManager = MetricReportManager.getInstance(); IMetricEmitter metricEmitter = new InMemoryMetricEmitter(props); metricManager.addMetricEmitter(metricEmitter); logger.info("Adding number of failed flow metric"); metricManager.addMetric(new NumFailedFlowMetric(metricManager, props .getInt(METRIC_INTERVAL + NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of failed jobs metric"); metricManager.addMetric(new NumFailedJobMetric(metricManager, props .getInt(METRIC_INTERVAL + NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of running Jobs metric"); metricManager.addMetric(new NumRunningJobMetric(metricManager, props .getInt(METRIC_INTERVAL + NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of running flows metric"); metricManager.addMetric(new NumRunningFlowMetric(runnerManager, metricManager, props.getInt(METRIC_INTERVAL + NumRunningFlowMetric.NUM_RUNNING_FLOW_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of queued flows metric"); metricManager.addMetric(new NumQueuedFlowMetric(runnerManager, metricManager, props.getInt(METRIC_INTERVAL + NumQueuedFlowMetric.NUM_QUEUED_FLOW_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Completed configuring Metric Reports"); } } /** * Load a custom class, which is provided by a configuration * CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY. * * This method will try to instantiate an instance of this custom class and * with given properties as the argument in the constructor. * * Basically the custom class must have a constructor that takes an argument * with type Properties. * * @param props */ private void loadCustomJMXAttributeProcessor(Props props) { String jmxAttributeEmitter = props.get(CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY); if (jmxAttributeEmitter != null) { try { logger.info("jmxAttributeEmitter: " + jmxAttributeEmitter); Constructor<Props>[] constructors = (Constructor<Props>[]) Class.forName(jmxAttributeEmitter) .getConstructors(); constructors[0].newInstance(props.toProperties()); } catch (Exception e) { logger.error("Encountered error while loading and instantiating " + jmxAttributeEmitter, e); throw new IllegalStateException( "Encountered error while loading and instantiating " + jmxAttributeEmitter, e); } } else { logger.info("No value for property: " + CUSTOM_JMX_ATTRIBUTE_PROCESSOR_PROPERTY + " was found"); } } private ExecutorLoader createExecLoader(Props props) { return new JdbcExecutorLoader(props); } private ProjectLoader createProjectLoader(Props props) { return new JdbcProjectLoader(props); } public void stopServer() throws Exception { server.stop(); server.destroy(); } public ProjectLoader getProjectLoader() { return projectLoader; } public ExecutorLoader getExecutorLoader() { return executionLoader; } /** * Returns the global azkaban properties * * @return */ public Props getAzkabanProps() { return props; } public Props getExecutorGlobalProps() { return executorGlobalProps; } /** * Returns the currently executing executor server, if one exists. * * @return */ public static AzkabanExecutorServer getApp() { return app; } /** * Azkaban using Jetty * * @param args * @throws IOException */ public static void main(String[] args) throws Exception { logger.info("Starting Jetty Azkaban Executor..."); Props azkabanSettings = AzkabanServer.loadProps(args); if (azkabanSettings == null) { logger.error("Azkaban Properties not loaded."); logger.error("Exiting Azkaban Executor Server..."); return; } // Setup time zone if (azkabanSettings.containsKey(DEFAULT_TIMEZONE_ID)) { String timezone = azkabanSettings.getString(DEFAULT_TIMEZONE_ID); System.setProperty("user.timezone", timezone); TimeZone.setDefault(TimeZone.getTimeZone(timezone)); DateTimeZone.setDefault(DateTimeZone.forID(timezone)); logger.info("Setting timezone to " + timezone); } app = new AzkabanExecutorServer(azkabanSettings); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { try { logTopMemoryConsumers(); } catch (Exception e) { logger.info(("Exception when logging top memory consumers"), e); } logger.info("Shutting down http server..."); try { app.stopServer(); } catch (Exception e) { logger.error("Error while shutting down http server.", e); } logger.info("kk thx bye."); } public void logTopMemoryConsumers() throws Exception, IOException { if (new File("/bin/bash").exists() && new File("/bin/ps").exists() && new File("/usr/bin/head").exists()) { logger.info("logging top memeory consumer"); java.lang.ProcessBuilder processBuilder = new java.lang.ProcessBuilder("/bin/bash", "-c", "/bin/ps aux --sort -rss | /usr/bin/head"); Process p = processBuilder.start(); p.waitFor(); InputStream is = p.getInputStream(); java.io.BufferedReader reader = new java.io.BufferedReader(new InputStreamReader(is)); String line = null; while ((line = reader.readLine()) != null) { logger.info(line); } is.close(); } } }); } /** * Loads the Azkaban property file from the AZKABAN_HOME conf directory * * @return */ /* package */static Props loadConfigurationFromAzkabanHome() { String azkabanHome = System.getenv("AZKABAN_HOME"); if (azkabanHome == null) { logger.error("AZKABAN_HOME not set. Will try default."); return null; } if (!new File(azkabanHome).isDirectory() || !new File(azkabanHome).canRead()) { logger.error(azkabanHome + " is not a readable directory."); return null; } File confPath = new File(azkabanHome, DEFAULT_CONF_PATH); if (!confPath.exists() || !confPath.isDirectory() || !confPath.canRead()) { logger .error(azkabanHome + " does not contain a readable conf directory."); return null; } return loadAzkabanConfigurationFromDirectory(confPath); } public FlowRunnerManager getFlowRunnerManager() { return runnerManager; } /** * Loads the Azkaban conf file int a Props object * * @param path * @return */ private static Props loadAzkabanConfigurationFromDirectory(File dir) { File azkabanPrivatePropsFile = new File(dir, AZKABAN_PRIVATE_PROPERTIES_FILE); File azkabanPropsFile = new File(dir, AZKABAN_PROPERTIES_FILE); Props props = null; try { // This is purely optional if (azkabanPrivatePropsFile.exists() && azkabanPrivatePropsFile.isFile()) { logger.info("Loading azkaban private properties file"); props = new Props(null, azkabanPrivatePropsFile); } if (azkabanPropsFile.exists() && azkabanPropsFile.isFile()) { logger.info("Loading azkaban properties file"); props = new Props(props, azkabanPropsFile); } } catch (FileNotFoundException e) { logger.error("File not found. Could not load azkaban config file", e); } catch (IOException e) { logger.error( "File found, but error reading. Could not load azkaban config file", e); } return props; } private void configureMBeanServer() { logger.info("Registering MBeans..."); mbeanServer = ManagementFactory.getPlatformMBeanServer(); registerMbean("executorJetty", new JmxJettyServer(server)); registerMbean("flowRunnerManager", new JmxFlowRunnerManager(runnerManager)); registerMbean("jobJMXMBean", JmxJobMBeanManager.getInstance()); if (JobCallbackManager.isInitialized()) { JobCallbackManager jobCallbackMgr = JobCallbackManager.getInstance(); registerMbean("jobCallbackJMXMBean", jobCallbackMgr.getJmxJobCallbackMBean()); } } public void close() { try { for (ObjectName name : registeredMBeans) { mbeanServer.unregisterMBean(name); logger.info("Jmx MBean " + name.getCanonicalName() + " unregistered."); } } catch (Exception e) { logger.error("Failed to cleanup MBeanServer", e); } } private void registerMbean(String name, Object mbean) { Class<?> mbeanClass = mbean.getClass(); ObjectName mbeanName; try { mbeanName = new ObjectName(mbeanClass.getName() + ":name=" + name); mbeanServer.registerMBean(mbean, mbeanName); logger.info("Bean " + mbeanClass.getCanonicalName() + " registered."); registeredMBeans.add(mbeanName); } catch (Exception e) { logger.error("Error registering mbean " + mbeanClass.getCanonicalName(), e); } } public List<ObjectName> getMbeanNames() { return registeredMBeans; } public MBeanInfo getMBeanInfo(ObjectName name) { try { return mbeanServer.getMBeanInfo(name); } catch (Exception e) { logger.error(e); return null; } } public Object getMBeanAttribute(ObjectName name, String attribute) { try { return mbeanServer.getAttribute(name, attribute); } catch (Exception e) { logger.error(e); return null; } } }
1
10,512
/DispatcherStatistics ? /statistics is a bit confusing with /stats
azkaban-azkaban
java
@@ -605,6 +605,9 @@ def get_lambda_log_events( or "START" in raw_message or "END" in raw_message or "REPORT" in raw_message + # necessary until tail is updated in docker images. See this PR: + # http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=commitdiff;h=v8.24-111-g1118f32 + or "tail: unrecognized file system type 0x794c7630" in raw_message or regex_filter and not re.search(regex_filter, raw_message) ):
1
import glob import importlib import io import json import os import re import shutil import tempfile import time import zipfile from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Tuple import requests from six import iteritems from localstack.constants import ( ENV_INTERNAL_TEST_RUN, LAMBDA_TEST_ROLE, LOCALSTACK_ROOT_FOLDER, LOCALSTACK_VENV_FOLDER, TEST_AWS_ACCOUNT_ID, ) from localstack.services.awslambda.lambda_utils import ( LAMBDA_DEFAULT_HANDLER, LAMBDA_DEFAULT_RUNTIME, LAMBDA_DEFAULT_STARTING_POSITION, get_handler_file_from_name, ) from localstack.utils.aws import aws_stack from localstack.utils.common import ( TMP_FILES, chmod_r, ensure_list, get_free_tcp_port, is_debian, is_empty_dir, is_port_open, load_file, mkdir, poll_condition, rm_rf, run, save_file, short_uid, to_str, ) from localstack.utils.run import FuncThread ARCHIVE_DIR_PREFIX = "lambda.archive." DEFAULT_GET_LOG_EVENTS_DELAY = 3 LAMBDA_TIMEOUT_SEC = 30 LAMBDA_ASSETS_BUCKET_NAME = "ls-test-lambda-assets-bucket" MAX_LAMBDA_ARCHIVE_UPLOAD_SIZE = 50_000_000 def is_local_test_mode(): """Whether we are running in the context of our local integration tests.""" return bool(os.environ.get(ENV_INTERNAL_TEST_RUN)) def copy_dir(source, target): if is_debian(): # Using the native command can be an order of magnitude faster on Travis-CI return run("cp -r %s %s" % (source, target)) shutil.copytree(source, target) def rm_dir(dir): if is_debian(): # Using the native command can be an order of magnitude faster on Travis-CI return run("rm -r %s" % dir) shutil.rmtree(dir) def create_lambda_archive( script: str, get_content: bool = False, libs: List[str] = [], runtime: str = None, file_name: str = None, exclude_func: Callable[[str], bool] = None, ): """Utility method to create a Lambda function archive""" runtime = runtime or LAMBDA_DEFAULT_RUNTIME with tempfile.TemporaryDirectory(prefix=ARCHIVE_DIR_PREFIX) as tmp_dir: file_name = file_name or get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) script_file = os.path.join(tmp_dir, file_name) if os.path.sep in script_file: mkdir(os.path.dirname(script_file)) # create __init__.py files along the path to allow Python imports path = file_name.split(os.path.sep) for i in range(1, len(path)): save_file(os.path.join(tmp_dir, *(path[:i] + ["__init__.py"])), "") save_file(script_file, script) chmod_r(script_file, 0o777) # copy libs for lib in libs: paths = [lib, "%s.py" % lib] try: module = importlib.import_module(lib) paths.append(module.__file__) except Exception: pass target_dir = tmp_dir root_folder = os.path.join(LOCALSTACK_VENV_FOLDER, "lib/python*/site-packages") if lib == "localstack": paths = ["localstack/*.py", "localstack/utils"] root_folder = LOCALSTACK_ROOT_FOLDER target_dir = os.path.join(tmp_dir, lib) mkdir(target_dir) for path in paths: file_to_copy = path if path.startswith("/") else os.path.join(root_folder, path) for file_path in glob.glob(file_to_copy): name = os.path.join(target_dir, file_path.split(os.path.sep)[-1]) if os.path.isdir(file_path): copy_dir(file_path, name) else: shutil.copyfile(file_path, name) if exclude_func: for dirpath, folders, files in os.walk(tmp_dir): for name in list(folders) + list(files): full_name = os.path.join(dirpath, name) relative = os.path.relpath(full_name, start=tmp_dir) if exclude_func(relative): rm_rf(full_name) # create zip file result = create_zip_file(tmp_dir, get_content=get_content) return result def delete_lambda_function(name): client = aws_stack.connect_to_service("lambda") client.delete_function(FunctionName=name) def create_zip_file_cli(source_path, base_dir, zip_file): # Using the native zip command can be an order of magnitude faster on Travis-CI source = "*" if source_path == base_dir else os.path.basename(source_path) command = "cd %s; zip -r %s %s" % (base_dir, zip_file, source) run(command) def create_zip_file_python(source_path, base_dir, zip_file, mode="w", content_root=None): with zipfile.ZipFile(zip_file, mode) as zip_file: for root, dirs, files in os.walk(base_dir): for name in files: full_name = os.path.join(root, name) relative = os.path.relpath(root, start=base_dir) if content_root: dest = os.path.join(content_root, relative, name) else: dest = os.path.join(relative, name) zip_file.write(full_name, dest) def create_zip_file(file_path, zip_file=None, get_content=False, content_root=None, mode="w"): """ Creates a zipfile to the designated file_path. By default, a new zip file is created but the mode parameter can be used to append to an existing zip file """ base_dir = file_path if not os.path.isdir(file_path): base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) shutil.copy(file_path, base_dir) TMP_FILES.append(base_dir) tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) full_zip_file = zip_file if not full_zip_file: zip_file_name = "archive.zip" full_zip_file = os.path.join(tmp_dir, zip_file_name) # special case where target folder is empty -> create empty zip file if is_empty_dir(base_dir): # see https://stackoverflow.com/questions/25195495/how-to-create-an-empty-zip-file#25195628 content = ( b"PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" ) if get_content: return content save_file(full_zip_file, content) return full_zip_file # create zip file if is_debian(): # todo: extend CLI with the new parameters create_zip_file_cli(source_path=file_path, base_dir=base_dir, zip_file=full_zip_file) else: create_zip_file_python( source_path=file_path, base_dir=base_dir, zip_file=full_zip_file, content_root=content_root, mode=mode, ) if not get_content: TMP_FILES.append(tmp_dir) return full_zip_file with open(full_zip_file, "rb") as file_obj: zip_file_content = file_obj.read() rm_dir(tmp_dir) return zip_file_content def create_lambda_function( func_name, zip_file=None, event_source_arn=None, handler_file=None, handler=None, starting_position=None, runtime=None, envvars={}, tags={}, libs=[], delete=False, layers=None, **kwargs, ): """Utility method to create a new function via the Lambda API""" starting_position = starting_position or LAMBDA_DEFAULT_STARTING_POSITION runtime = runtime or LAMBDA_DEFAULT_RUNTIME client = aws_stack.connect_to_service("lambda") # load zip file content if handler_file is specified if not zip_file and handler_file: file_content = load_file(handler_file) if os.path.exists(handler_file) else handler_file if libs or not handler: zip_file = create_lambda_archive( file_content, libs=libs, get_content=True, runtime=runtime or LAMBDA_DEFAULT_RUNTIME, ) else: zip_file = create_zip_file(handler_file, get_content=True) handler = handler or LAMBDA_DEFAULT_HANDLER if delete: try: # Delete function if one already exists client.delete_function(FunctionName=func_name) except Exception: pass lambda_code = {"ZipFile": zip_file} if len(zip_file) > MAX_LAMBDA_ARCHIVE_UPLOAD_SIZE: s3 = aws_stack.connect_to_service("s3") aws_stack.get_or_create_bucket(LAMBDA_ASSETS_BUCKET_NAME) asset_key = f"{short_uid()}.zip" s3.upload_fileobj( Fileobj=io.BytesIO(zip_file), Bucket=LAMBDA_ASSETS_BUCKET_NAME, Key=asset_key ) lambda_code = {"S3Bucket": LAMBDA_ASSETS_BUCKET_NAME, "S3Key": asset_key} # create function additional_kwargs = kwargs kwargs = { "FunctionName": func_name, "Runtime": runtime, "Handler": handler, "Role": LAMBDA_TEST_ROLE, "Code": lambda_code, "Timeout": LAMBDA_TIMEOUT_SEC, "Environment": dict(Variables=envvars), "Tags": tags, } kwargs.update(additional_kwargs) if layers: kwargs["Layers"] = layers create_func_resp = client.create_function(**kwargs) resp = { "CreateFunctionResponse": create_func_resp, "CreateEventSourceMappingResponse": None, } # create event source mapping if event_source_arn: resp["CreateEventSourceMappingResponse"] = client.create_event_source_mapping( FunctionName=func_name, EventSourceArn=event_source_arn, StartingPosition=starting_position, ) return resp def connect_api_gateway_to_http_with_lambda_proxy( gateway_name, target_uri, stage_name=None, methods=[], path=None, auth_type=None, auth_creator_func=None, http_method=None, ): if not methods: methods = ["GET", "POST", "DELETE"] if not path: path = "/" stage_name = stage_name or "test" resources = {} resource_path = path.lstrip("/") resources[resource_path] = [] for method in methods: int_meth = http_method or method resources[resource_path].append( { "httpMethod": method, "authorizationType": auth_type, "authorizerId": None, "integrations": [{"type": "AWS_PROXY", "uri": target_uri, "httpMethod": int_meth}], } ) return aws_stack.create_api_gateway( name=gateway_name, resources=resources, stage_name=stage_name, auth_creator_func=auth_creator_func, ) def create_lambda_api_gateway_integration( gateway_name, func_name, handler_file, methods=[], path=None, runtime=None, stage_name=None, auth_type=None, auth_creator_func=None, ): path = path or "/test" auth_type = auth_type or "REQUEST" stage_name = stage_name or "test" # create Lambda zip_file = create_lambda_archive(handler_file, get_content=True, runtime=runtime) create_lambda_function(func_name=func_name, zip_file=zip_file, runtime=runtime) func_arn = aws_stack.lambda_function_arn(func_name) target_arn = aws_stack.apigateway_invocations_arn(func_arn) # connect API GW to Lambda result = connect_api_gateway_to_http_with_lambda_proxy( gateway_name, target_arn, stage_name=stage_name, path=path, methods=methods, auth_type=auth_type, auth_creator_func=auth_creator_func, ) return result def assert_objects(asserts, all_objects): if type(asserts) is not list: asserts = [asserts] for obj in asserts: assert_object(obj, all_objects) def assert_object(expected_object, all_objects): # for Python 3 compatibility dict_values = type({}.values()) if isinstance(all_objects, dict_values): all_objects = list(all_objects) # wrap single item in an array if type(all_objects) is not list: all_objects = [all_objects] found = find_object(expected_object, all_objects) if not found: raise Exception("Expected object not found: %s in list %s" % (expected_object, all_objects)) def find_object(expected_object, object_list): for obj in object_list: if isinstance(obj, list): found = find_object(expected_object, obj) if found: return found all_ok = True if obj != expected_object: if not isinstance(expected_object, dict): all_ok = False else: for k, v in iteritems(expected_object): if not find_recursive(k, v, obj): all_ok = False break if all_ok: return obj return None def find_recursive(key, value, obj): if isinstance(obj, dict): for k, v in iteritems(obj): if k == key and v == value: return True if find_recursive(key, value, v): return True elif isinstance(obj, list): for o in obj: if find_recursive(key, value, o): return True else: return False def start_http_server( test_port: int = None, invocations: List = None, invocation_handler: Callable = None ) -> Tuple[int, List, FuncThread]: # Note: leave imports here to avoid import errors (e.g., "flask") for CLI commands from localstack.services.generic_proxy import ProxyListener from localstack.services.infra import start_proxy class TestListener(ProxyListener): def forward_request(self, **kwargs): if invocation_handler: kwargs = invocation_handler(**kwargs) invocations.append(kwargs) return 200 test_port = test_port or get_free_tcp_port() invocations = invocations or [] proxy = start_proxy(test_port, update_listener=TestListener()) return test_port, invocations, proxy def list_all_s3_objects(): return map_all_s3_objects().values() def delete_all_s3_objects(buckets): s3_client = aws_stack.connect_to_service("s3") buckets = buckets if isinstance(buckets, list) else [buckets] for bucket in buckets: keys = all_s3_object_keys(bucket) deletes = [{"Key": key} for key in keys] if deletes: s3_client.delete_objects(Bucket=bucket, Delete={"Objects": deletes}) def download_s3_object(s3, bucket, path): with tempfile.SpooledTemporaryFile() as tmpfile: s3.Bucket(bucket).download_fileobj(path, tmpfile) tmpfile.seek(0) result = tmpfile.read() try: result = to_str(result) except Exception: pass return result def all_s3_object_keys(bucket: str) -> List[str]: s3_client = aws_stack.connect_to_resource("s3") bucket = s3_client.Bucket(bucket) if isinstance(bucket, str) else bucket keys = [key.key for key in bucket.objects.all()] return keys def map_all_s3_objects(to_json: bool = True, buckets: List[str] = None) -> Dict[str, Any]: s3_client = aws_stack.connect_to_resource("s3") result = {} buckets = ensure_list(buckets) buckets = [s3_client.Bucket(b) for b in buckets] if buckets else s3_client.buckets.all() for bucket in buckets: for key in bucket.objects.all(): value = download_s3_object(s3_client, key.bucket_name, key.key) try: if to_json: value = json.loads(value) key = "%s%s%s" % ( key.bucket_name, "" if key.key.startswith("/") else "/", key.key, ) result[key] = value except Exception: # skip non-JSON or binary objects pass return result def get_sample_arn(service, resource): return "arn:aws:%s:%s:%s:%s" % ( service, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, resource, ) def send_describe_dynamodb_ttl_request(table_name): return send_dynamodb_request("", "DescribeTimeToLive", json.dumps({"TableName": table_name})) def send_update_dynamodb_ttl_request(table_name, ttl_status): return send_dynamodb_request( "", "UpdateTimeToLive", json.dumps( { "TableName": table_name, "TimeToLiveSpecification": { "AttributeName": "ExpireItem", "Enabled": ttl_status, }, } ), ) def send_dynamodb_request(path, action, request_body): headers = { "Host": "dynamodb.amazonaws.com", "x-amz-target": "DynamoDB_20120810.{}".format(action), "Authorization": aws_stack.mock_aws_request_headers("dynamodb")["Authorization"], } url = "{}/{}".format(os.getenv("TEST_DYNAMODB_URL"), path) return requests.put(url, data=request_body, headers=headers, verify=False) def create_sqs_queue(queue_name): """Utility method to create a new queue via SQS API""" client = aws_stack.connect_to_service("sqs") # create queue queue_url = client.create_queue(QueueName=queue_name)["QueueUrl"] # get the queue arn queue_arn = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["QueueArn"],)[ "Attributes" ]["QueueArn"] return { "QueueUrl": queue_url, "QueueArn": queue_arn, } def get_lambda_log_group_name(function_name): return "/aws/lambda/{}".format(function_name) def check_expected_lambda_log_events_length(expected_length, function_name, regex_filter=None): events = get_lambda_log_events(function_name, regex_filter=regex_filter) events = [line for line in events if line not in ["\x1b[0m", "\\x1b[0m"]] if len(events) != expected_length: print( "Invalid # of Lambda %s log events: %s / %s: %s" % ( function_name, len(events), expected_length, [ event if len(event) < 1000 else f"{event[:1000]}... (truncated)" for event in events ], ) ) assert len(events) == expected_length return events def get_lambda_log_events( function_name, delay_time=DEFAULT_GET_LOG_EVENTS_DELAY, regex_filter: Optional[str] = None ): def get_log_events(function_name, delay_time): time.sleep(delay_time) logs = aws_stack.connect_to_service("logs") log_group_name = get_lambda_log_group_name(function_name) return list_all_resources( lambda kwargs: logs.filter_log_events(logGroupName=log_group_name, **kwargs), last_token_attr_name="nextToken", list_attr_name="events", ) try: events = get_log_events(function_name, delay_time) except Exception as e: if "ResourceNotFoundException" in str(e): return [] raise rs = [] for event in events: raw_message = event["message"] if ( not raw_message or "START" in raw_message or "END" in raw_message or "REPORT" in raw_message or regex_filter and not re.search(regex_filter, raw_message) ): continue if raw_message in ["\x1b[0m", "\\x1b[0m"]: continue try: rs.append(json.loads(raw_message)) except Exception: rs.append(raw_message) return rs @contextmanager def http_server(handler, host="127.0.0.1", port=None) -> str: """ Create a temporary http server on a random port (or the specified port) with the given handler for the duration of the context manager. Example usage: def handler(request, data): print(request.method, request.path, data) with testutil.http_server(handler) as url: requests.post(url, json={"message": "hello"}) """ from localstack.utils.server.http2_server import run_server host = host port = port or get_free_tcp_port() thread = run_server(port, host, handler=handler, asynchronous=True) url = f"http://{host}:{port}" assert poll_condition( lambda: is_port_open(port), timeout=5 ), f"server on port {port} did not start" yield url thread.stop() @contextmanager def proxy_server(proxy_listener, host="127.0.0.1", port=None) -> str: """ Create a temporary proxy server on a random port (or the specified port) with the given proxy listener for the duration of the context manager. """ from localstack.services.generic_proxy import start_proxy_server host = host port = port or get_free_tcp_port() thread = start_proxy_server(port, bind_address=host, update_listener=proxy_listener) url = f"http://{host}:{port}" assert poll_condition( lambda: is_port_open(port), timeout=5 ), f"server on port {port} did not start" yield url thread.stop() def json_response(data, code=200, headers: Dict = None) -> requests.Response: r = requests.Response() r._content = json.dumps(data) r.status_code = code if headers: r.headers.update(headers) return r def list_all_resources( page_function: Callable[[dict], Any], last_token_attr_name: str, list_attr_name: str, next_token_attr_name: Optional[str] = None, ) -> list: """ List all available resources by loading all available pages using `page_function`. :type page_function: Callable :param page_function: callable function or lambda that accepts kwargs with next token and returns the next results page :type last_token_attr_name: str :param last_token_attr_name: where to look for the last evaluated token :type list_attr_name: str :param list_attr_name: where to look for the list of items :type next_token_attr_name: Optional[str] :param next_token_attr_name: name of kwarg with the next token, default is the same as `last_token_attr_name` Example usage: all_log_groups = list_all_resources( lambda kwargs: logs.describe_log_groups(**kwargs), last_token_attr_name="nextToken", list_attr_name="logGroups" ) all_records = list_all_resources( lambda kwargs: dynamodb.scan(**{**kwargs, **dynamodb_kwargs}), last_token_attr_name="LastEvaluatedKey", next_token_attr_name="ExclusiveStartKey", list_attr_name="Items" ) """ if next_token_attr_name is None: next_token_attr_name = last_token_attr_name result = None collected_items = [] last_evaluated_token = None while not result or last_evaluated_token: kwargs = {next_token_attr_name: last_evaluated_token} if last_evaluated_token else {} result = page_function(kwargs) last_evaluated_token = result.get(last_token_attr_name) collected_items += result.get(list_attr_name, []) return collected_items
1
13,861
nit: Wondering if we should simply filter on `"tail: unrecognized file system type"`, or is the type identifier `0x794c7630` always the same?
localstack-localstack
py